Mike Hankinson
April 21, 2022
#Mounting the drive
from google.colab import drive
drive.mount('/content/drive')
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
#Importing libraries required to load the data
import zipfile
import os
from PIL import Image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, BatchNormalization, Dropout, Flatten, LeakyReLU, GlobalAvgPool2D
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import optimizers
#to ignore warnings
import warnings
warnings.filterwarnings('ignore')
# Remove the limit from the number of displayed columns and rows. It helps to see the entire dataframe while printing it
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", 200)
Note:
#Storing the path of the data file from the Google drive
path = '/content/drive/Othercomputers/My Laptop/!Mike_Sync/!MIT_Applied_Data_Science/7_Capstone_Project/cell_images.zip'
#The data is provided as a zip file so we need to extract the files from the zip file
with zipfile.ZipFile(path, 'r') as zip_ref:
zip_ref.extractall()
The extracted folder has different folders for train and test data which further contains the different sizes of images for parasitized and uninfected cells within the respective folder name.
The size of all images must be the same and should be converted to 4D arrays so that they can be used as an input for the convolutional neural network. Also, we need to create the labels for both types of images to be able to train and test the model.
Let's do the same for the training data first and then we will use the same code for the test data as well.
#Storing the path of the extracted "train" folder
train_dir = '/content/cell_images/train'
#Size of image so that each image has the same size
SIZE = 64
#Empty list to store the training images after they are converted to NumPy arrays
train_images = []
#Empty list to store the training labels (0 - uninfected, 1 - parasitized)
train_labels = []
#We will run the same code for "parasitized" as well as "uninfected" folders within the "train" folder
for folder_name in ['/parasitized/', '/uninfected/']:
#Path of the folder
images_path = os.listdir(train_dir + folder_name)
for i, image_name in enumerate(images_path):
try:
#Opening each image using the path of that image
image = Image.open(train_dir + folder_name + image_name)
#Resizing each image to (64,64)
image = image.resize((SIZE, SIZE))
#Converting images to arrays and appending that array to the empty list defined above
train_images.append(np.array(image))
#Creating labels for parasitized and uninfected images
if folder_name=='/parasitized/':
train_labels.append(1)
else:
train_labels.append(0)
except Exception:
pass
#Converting lists to arrays
train_images = np.array(train_images)
train_labels = np.array(train_labels)
print(train_labels[:10])
print(train_images[:1])
[1 1 1 1 1 1 1 1 1 1] [[[[0 0 0] [0 0 0] [0 0 0] ... [0 0 0] [0 0 0] [0 0 0]] [[0 0 0] [0 0 0] [0 0 0] ... [0 0 0] [0 0 0] [0 0 0]] [[0 0 0] [0 0 0] [0 0 0] ... [0 0 0] [0 0 0] [0 0 0]] ... [[0 0 0] [0 0 0] [0 0 0] ... [0 0 0] [0 0 0] [0 0 0]] [[0 0 0] [0 0 0] [0 0 0] ... [0 0 0] [0 0 0] [0 0 0]] [[0 0 0] [0 0 0] [0 0 0] ... [0 0 0] [0 0 0] [0 0 0]]]]
#Storing the path of the extracted "test" folder
test_dir = '/content/cell_images/test/'
#Size of image so that each image has the same size (it must be same as the train image size)
SIZE = 64
#Empty list to store the testing images after they are converted to NumPy arrays
test_images = []
#Empty list to store the testing labels (0 - uninfected, 1 - parasitized)
test_labels = []
#We will run the same code for "parasitized" as well as "uninfected" folders within the "test" folder
for folder_name in ['/parasitized/', '/uninfected/']:
#Path of the folder
images_path = os.listdir(test_dir + folder_name)
for i, image_name in enumerate(images_path):
try:
#Opening each image using the path of that image
image = Image.open(test_dir + folder_name + image_name)
#Resizing each image to (64,64)
image = image.resize((SIZE, SIZE))
#Converting images to arrays and appending that array to the empty list defined above
test_images.append(np.array(image))
#Creating labels for parasitized and uninfected images
if folder_name=='/parasitized/':
test_labels.append(1)
else:
test_labels.append(0)
except Exception:
pass
#Converting lists to arrays
test_images = np.array(test_images)
test_labels = np.array(test_labels)
print(test_labels[:10])
print(test_images[:1])
[1 1 1 1 1 1 1 1 1 1] [[[[0 0 0] [0 0 0] [0 0 0] ... [0 0 0] [0 0 0] [0 0 0]] [[0 0 0] [0 0 0] [0 0 0] ... [0 0 0] [0 0 0] [0 0 0]] [[0 0 0] [0 0 0] [0 0 0] ... [0 0 0] [0 0 0] [0 0 0]] ... [[0 0 0] [0 0 0] [0 0 0] ... [0 0 0] [0 0 0] [0 0 0]] [[0 0 0] [0 0 0] [0 0 0] ... [0 0 0] [0 0 0] [0 0 0]] [[0 0 0] [0 0 0] [0 0 0] ... [0 0 0] [0 0 0] [0 0 0]]]]
# shape of images
print("Shape train_images:", train_images[0].shape)
print("Shape test_images:", test_images[0].shape)
Shape train_images: (64, 64, 3) Shape test_images: (64, 64, 3)
# shape of labels
print("Shape train_labels:", train_labels.shape)
print("Shape test_labels:", test_labels.shape)
print()
print()
print("Count train_labels:", len(train_labels))
print("Count test_labels:", len(test_labels))
Shape train_labels: (24958,) Shape test_labels: (2600,) Count train_labels: 24958 Count test_labels: 2600
# try to use min and max function from numpy
print("train_images maximum pixels:", np.amax(train_images), " train_images minimum pixels:", np.amin(train_images))
print()
print("test_images maximum pixels:", np.amax(test_images), " test_images minimum pixels:", np.amin(test_images))
train_images maximum pixels: 255 train_images minimum pixels: 0 test_images maximum pixels: 255 test_images minimum pixels: 0
# try to use value_counts to count the values
print("Count of parasitized train data:",np.count_nonzero(train_labels == 1, axis=0))
print("Count of uninfected train data:",np.count_nonzero(train_labels == 0, axis=0))
print()
print("Count of parasitized test data:",np.count_nonzero(test_labels == 1, axis=0))
print("Count of uninfected test data:",np.count_nonzero(test_labels == 0, axis=0))
Count of parasitized train data: 12582 Count of uninfected train data: 12376 Count of parasitized test data: 1300 Count of uninfected test data: 1300
# try to normalize the train and test images by dividing it by 255 and convert them to float32 using astype function
train_images = (train_images/255).astype('float32')
test_images = (test_images/255).astype('float32')
| Train Data | Test Data | |
|---|---|---|
| Count of Parasitized | 12,582 | 1,300 |
| Count of Uninfected | 12,376 | 1,300 |
# you are free to use bar plot or pie-plot or count plot, etc. to plot the labels of train and test data and check if they are balanced
import matplotlib.pyplot as plt
import seaborn as sns
#define data
train_para = np.count_nonzero(train_labels == 1, axis=0)
train_uninfect = np.count_nonzero(train_labels == 0, axis=0)
data = [train_para, train_uninfect]
labels = ['Uninfected', 'Parasitized']
explode = (0, 0.1) # only "explode" the 2nd slice
#define Seaborn color palette to use
colors = sns.color_palette('pastel')#[0:1]
#create pie chart
plt.pie(data, labels = labels, colors = colors, autopct='%1.1f%%', shadow=True, explode=explode)
plt.show()
print(train_uninfect)
print(train_para)
12376 12582
#define data
test_para = np.count_nonzero(test_labels == 1, axis=0)
test_uninfect = np.count_nonzero(test_labels == 0, axis=0)
data = [test_para, test_uninfect]
labels = ['Uninfected', 'Parasitized']
explode = (0, 0.1) # only "explode" the 2nd slice
#define Seaborn color palette to use
colors = sns.color_palette('pastel')#[0:1]
#create pie chart
plt.pie(data, labels = labels, colors = colors, autopct='%1.1f%%', shadow=True, explode=explode)
plt.show()
Let's visualize the images from the train data
# This code will help you in visualizing both the parasitized and uninfected images
np.random.seed(42)
plt.figure(1 , figsize = (16 , 16))
for n in range(1, 17):
plt.subplot(4, 4, n)
index = int(np.random.randint(0, train_images.shape[0], 1))
if train_labels[index] == 1:
plt.title('parasitized')
else:
plt.title('uninfected')
plt.imshow(train_images[index])
plt.axis('off')
# Hint: Have a keen look into the number of iterations that the for loop should iterate
# This code will help you in visualizing both the parasitized and uninfected images
np.random.seed(42)
plt.figure(1 , figsize = (12 , 12))
for n in range(1, 37):
plt.subplot(6, 6, n)
index = int(np.random.randint(0, train_images.shape[0], 1))
if train_labels[index] == 1:
plt.title('parasitized')
else:
plt.title('uninfected')
plt.imshow(train_images[index])
plt.axis('off')
# function to find the mean
def find_mean_img(full_mat, title):
# calculate the average
mean_img = np.mean(full_mat, axis = 0)[0]
# reshape it back to a matrix
plt.imshow(mean_img)
plt.title(f'Average {title}')
plt.axis('off')
plt.show()
return mean_img
Mean image for parasitized
# If the label=1 then the image is parasitised and if the label=0 then the image is uninfected
parasitized_data=[] # Create a list to store the parasitized data
for img, label in zip(train_images, train_labels):
if label==1:
parasitized_data.append([img])
parasitized_mean = find_mean_img(np.array(parasitized_data), 'Parasitized') # find the mean
Mean image for uninfected
# Similarly write the code to find the mean image of uninfected
uninfected_data=[] # Create a list to store the uninfected data
for img, label in zip(train_images, train_labels):
if label==0:
uninfected_data.append([img])
uninfected_mean = find_mean_img(np.array(uninfected_data), 'Uninfected') # find the mean
"In RGB, we cannot separate color information from luminance. HSV (Hue, Saturation, Value) is used to separate image luminance from color information."
Image citation: Wikipedia link text
import cv2
gfx=[] # to hold the HSV image array
for i in np.arange(0,2000,1):
a=cv2.cvtColor(train_images[i],cv2.COLOR_BGR2HSV)
gfx.append(a)
gfx=np.array(gfx)
viewimage=np.random.randint(1,2000,500)
fig,ax=plt.subplots(1,6,figsize=(18,18))
for t,i in zip(range(6),viewimage):
if test_labels[i] == 1:
Title='Parasitized'
else:
Title ='Uninfected'
ax[t].set_title(Title)
ax[t].imshow(gfx[i])
ax[t].set_axis_off()
fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
# Similarly you can visualize for the images in the test data
gfx_test=[] # to hold the HSV image array
for i in np.arange(0,2000,1):
a=cv2.cvtColor(test_images[i],cv2.COLOR_BGR2HSV)
gfx_test.append(a)
gfx_test=np.array(gfx_test)
viewimage=np.random.randint(1,2000,500)
fig,ax=plt.subplots(1,6,figsize=(18,18))
for t,i in zip(range(6),viewimage):
if test_labels[i] == 1:
Title='Parasitized'
else:
Title ='Uninfected'
ax[t].set_title(Title)
ax[t].imshow(gfx_test[i])
ax[t].set_axis_off()
fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
"In image processing, a Gaussian blur (also known as Gaussian smoothing) is the result of blurring an image by a Gaussian function (named after mathematician and scientist Carl Friedrich Gauss).
It is a widely used effect in graphics software, typically to reduce image noise and reduce detail."
Reference: Wikipedia link text
gbx=[] ## to hold the blurred images
for i in np.arange(0,2000,1):
b= cv2.GaussianBlur(train_images[i], (5, 5), 0)
gbx.append(b)
gbx=np.array(gbx)
viewimage=np.random.randint(1,2000,500)
fig,ax=plt.subplots(1,6,figsize=(18,18))
for t,i in zip(range(6),viewimage):
if test_labels[i] == 1:
Title='Parasitized'
else:
Title ='Uninfected'
ax[t].set_title(Title)
ax[t].imshow(gbx[i])
ax[t].set_axis_off()
fig.tight_layout()
# Similarly you can apply Gaussian blurring for the images in the test data
gbx_test=[] ## to hold the blurred images
for i in np.arange(0, 2000, 1):
b= cv2.GaussianBlur(test_images[i], (5, 5), 0)
gbx_test.append(b)
gbx_test=np.array(gbx_test)
viewimage=np.random.randint(1,2000, 500)
fig,ax=plt.subplots(1,6,figsize=(18,18))
for t,i in zip(range(6),viewimage):
if test_labels[i] == 1:
Title='Parasitized'
else:
Title ='Uninfected'
ax[t].set_title(Title)
ax[t].imshow(gbx_test[i])
ax[t].set_axis_off()
fig.tight_layout()
Think About It: Would blurring help us for this problem statement in any way? What else can we try?
Potential techniques: What different techniques should be explored?
Overall solution design: What is the potential solution design?
#Mounting the drive
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
#Importing libraries required to load the data
import zipfile
import os
from PIL import Image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, BatchNormalization, Dropout, Flatten, LeakyReLU, GlobalAvgPool2D
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import optimizers
#to ignore warnings
import warnings
warnings.filterwarnings('ignore')
# Remove the limit from the number of displayed columns and rows. It helps to see the entire dataframe while printing it
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", 200)
Note:
#Storing the path of the data file from the Google drive
path = '/content/drive/Othercomputers/My Laptop/!Mike_Sync/!MIT_Applied_Data_Science/7_Capstone_Project/cell_images.zip'
#The data is provided as a zip file so we need to extract the files from the zip file
with zipfile.ZipFile(path, 'r') as zip_ref:
zip_ref.extractall()
The files have been extracted to the local session of Google Colab. The extracted folder would have the following structure:
The extracted folder has different folders for train and test data which further contains the different sizes of images for parasitized and uninfected cells within the respective folder name.
The size of all images must be the same and should be converted to 4D arrays so that they can be used as an input for the convolutional neural network. Also, we need to create the labels for both types of images to be able to train and test the model.
Let's do the same for the training data first and then we will use the same code for the test data as well.
#Storing the path of the extracted "train" folder
train_dir = '/content/cell_images/train'
#Size of image so that each image has the same size
SIZE = 64
#Empty list to store the training images after they are converted to NumPy arrays
train_images = []
#Empty list to store the training labels (0 - uninfected, 1 - parasitized)
train_labels = []
#We will run the same code for "parasitized" as well as "uninfected" folders within the "train" folder
for folder_name in ['/parasitized/', '/uninfected/']:
#Path of the folder
images_path = os.listdir(train_dir + folder_name)
for i, image_name in enumerate(images_path):
try:
#Opening each image using the path of that image
image = Image.open(train_dir + folder_name + image_name)
#Resizing each image to (64,64)
image = image.resize((SIZE, SIZE))
#Converting images to arrays and appending that array to the empty list defined above
train_images.append(np.array(image))
#Creating labels for parasitized and uninfected images
if folder_name=='/parasitized/':
train_labels.append(1)
else:
train_labels.append(0)
except Exception:
pass
#Converting lists to arrays
train_images = np.array(train_images)
train_labels = np.array(train_labels)
#Storing the path of the extracted "test" folder
test_dir = '/content/cell_images/test'
#Size of image so that each image has the same size (it must be same as the train image size)
SIZE = 64
#Empty list to store the testing images after they are converted to NumPy arrays
test_images = []
#Empty list to store the testing labels (0 - uninfected, 1 - parasitized)
test_labels = []
#We will run the same code for "parasitized" as well as "uninfected" folders within the "test" folder
for folder_name in ['/parasitized/', '/uninfected/']:
#Path of the folder
images_path = os.listdir(test_dir + folder_name)
for i, image_name in enumerate(images_path):
try:
#Opening each image using the path of that image
image = Image.open(test_dir + folder_name + image_name)
#Resizing each image to (64,64)
image = image.resize((SIZE, SIZE))
#Converting images to arrays and appending that array to the empty list defined above
test_images.append(np.array(image))
#Creating labels for parasitized and uninfected images
if folder_name=='/parasitized/':
test_labels.append(1)
else:
test_labels.append(0)
except Exception:
pass
#Converting lists to arrays
test_images = np.array(test_images)
test_labels = np.array(test_labels)
# try to normalize the train and test images by dividing it by 255 and convert them to float32 using astype function
train_images = (train_images/255).astype('float32')
test_images = (test_images/255).astype('float32')
As we have done our preprocessing required and performed some EDA to gain some insights in our Milestone-1 so now we will try to build our model and try evaluating its performance.
# Encoding Train Labels
train_labels=to_categorical(train_labels,2)
# Similarly let us try to encode test labels
test_labels=to_categorical(test_labels,2)
# print(train_labels)
#print(test_labels)
# One Hot Encoding - Alternative
# using this utility function - https://www.tensorflow.org/api_docs/python/tf/keras/utils/to_categorical
#train_labels = tf.keras.utils.to_categorical(train_labels)
#test_labels = tf.keras.utils.to_categorical(test_labels)
#train_labels
# function to plot train and validation accuracy
def plot_accuracy(history):
N = len(history.history["accuracy"])
plt.figure(figsize=(7,7))
plt.plot(np.arange(0, N), history.history["accuracy"], label="train_accuracy", ls='--')
plt.plot(np.arange(0, N), history.history["val_accuracy"], label="val_accuracy", ls='--')
plt.title("Accuracy vs Epoch")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend(loc="upper left")
Note: The Base Model has been fully built and evaluated with all outputs shown to give an idea about the process of the creation and evaluation of the performance of a CNN architecture. A similar process can be followed in iterating to build better-performing CNN architectures.
#Clearing backend
from tensorflow.keras import backend
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from random import shuffle
backend.clear_session()
#Fixing the seed for random number generators so that we can ensure we receive the same output everytime
np.random.seed(42)
import random
random.seed(42)
tf.random.set_seed(42)
# Creating sequential model
model=Sequential()
# First Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding and input shape of 64*64*3
model.add(Conv2D(filters=32,kernel_size=2,padding="same",activation="relu",input_shape=(64,64,3)))
# max-pooling layer with a pool size of 2
model.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model.add(Dropout(0.2))
# Second Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding
model.add(Conv2D(filters=32,kernel_size=2,padding="same",activation="relu"))
# max-pooling layer with a pool size of 2
model.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model.add(Dropout(0.2))
# Third Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding
model.add(Conv2D(filters=32,kernel_size=2,padding="same",activation="relu"))
# max-pooling layer with a pool size of 2
model.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model.add(Dropout(0.2))
# Flatten the output from the previous layer
model.add(Flatten())
# Output layer with nodes equal to the number of classes and softmax activation
model.add(Dense(512,activation="relu"))
model.add(Dropout(0.4))
model.add(Dense(2,activation="softmax")) #2 represent output layer neurons
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 64, 64, 32) 416
max_pooling2d (MaxPooling2D (None, 32, 32, 32) 0
)
dropout (Dropout) (None, 32, 32, 32) 0
conv2d_1 (Conv2D) (None, 32, 32, 32) 4128
max_pooling2d_1 (MaxPooling (None, 16, 16, 32) 0
2D)
dropout_1 (Dropout) (None, 16, 16, 32) 0
conv2d_2 (Conv2D) (None, 16, 16, 32) 4128
max_pooling2d_2 (MaxPooling (None, 8, 8, 32) 0
2D)
dropout_2 (Dropout) (None, 8, 8, 32) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
dropout_3 (Dropout) (None, 512) 0
dense_1 (Dense) (None, 2) 1026
=================================================================
Total params: 1,058,786
Trainable params: 1,058,786
Non-trainable params: 0
_________________________________________________________________
tf.keras.utils.plot_model(
model,
to_file='model.png',
show_shapes=True,
show_dtype=False,
show_layer_names=True,
rankdir='TB',
expand_nested=False,
dpi=96,
layer_range=None,
show_layer_activations=True
)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
Using Callbacks
# Callbacks help in saving the checkpoints and stopping at an accuracy where the model does not seem to improve
callbacks = [EarlyStopping(monitor='val_loss', patience=2),
ModelCheckpoint('.mdl_wts.hdf5', monitor='val_loss', save_best_only=True)]
Fit and train our Model
#Fit the model with min batch size as 32 can tune batch size to some factor of 2^power ]
history=model.fit(train_images,train_labels,batch_size=32,callbacks=callbacks,validation_split=0.2,epochs=20,verbose=1)
Epoch 1/20 624/624 [==============================] - 6s 6ms/step - loss: 0.4058 - accuracy: 0.7981 - val_loss: 0.1689 - val_accuracy: 0.9443 Epoch 2/20 624/624 [==============================] - 4s 6ms/step - loss: 0.1249 - accuracy: 0.9583 - val_loss: 0.1208 - val_accuracy: 0.9794 Epoch 3/20 624/624 [==============================] - 4s 6ms/step - loss: 0.0929 - accuracy: 0.9707 - val_loss: 0.0841 - val_accuracy: 0.9842 Epoch 4/20 624/624 [==============================] - 4s 6ms/step - loss: 0.0771 - accuracy: 0.9736 - val_loss: 0.0617 - val_accuracy: 0.9868 Epoch 5/20 624/624 [==============================] - 4s 6ms/step - loss: 0.0729 - accuracy: 0.9748 - val_loss: 0.0848 - val_accuracy: 0.9786 Epoch 6/20 624/624 [==============================] - 4s 6ms/step - loss: 0.0729 - accuracy: 0.9766 - val_loss: 0.0585 - val_accuracy: 0.9846 Epoch 7/20 624/624 [==============================] - 4s 6ms/step - loss: 0.0671 - accuracy: 0.9768 - val_loss: 0.0695 - val_accuracy: 0.9820 Epoch 8/20 624/624 [==============================] - 4s 6ms/step - loss: 0.0647 - accuracy: 0.9770 - val_loss: 0.0628 - val_accuracy: 0.9818
accuracy = model.evaluate(test_images, test_labels, verbose=1)
print('\n', 'Test_Accuracy:-', accuracy[1])
82/82 [==============================] - 0s 4ms/step - loss: 0.0783 - accuracy: 0.9735 Test_Accuracy:- 0.9734615087509155
Plotting the confusion matrix
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
pred = model.predict(test_images)
pred = np.argmax(pred,axis = 1)
y_true = np.argmax(test_labels,axis = 1)
#Printing the classification report
print(classification_report(y_true,pred))
#Plotting the heatmap using confusion matrix
cm = confusion_matrix(y_true,pred)
plt.figure(figsize=(8,5))
sns.heatmap(cm, annot=True, fmt='.0f', xticklabels=['Uninfected', 'Parasitized'], yticklabels=['Uninfected', 'Parasitized'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
precision recall f1-score support
0 0.97 0.98 0.97 1300
1 0.98 0.97 0.97 1300
accuracy 0.97 2600
macro avg 0.97 0.97 0.97 2600
weighted avg 0.97 0.97 0.97 2600
Plotting the train and validation curves
# function to plot train and validation accuracy
def plot_accuracy(history):
N = len(history.history["accuracy"])
plt.figure(figsize=(7,7))
plt.plot(np.arange(0, N), history.history["accuracy"], label="train_accuracy", ls='--')
plt.plot(np.arange(0, N), history.history["val_accuracy"], label="val_accuracy", ls='--')
plt.title("Accuracy vs Epoch")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend(loc="upper left")
plot_accuracy(history)
Observations: Base Model
| Test Accuracy | False negative out of 1,300 |
False Positive out of 1,300 |
|
|---|---|---|---|
| Base | 0.97 | 41 | 28 |
So now let's try to build another model with few more add on layers and try to check if we can try to improve the model. Therefore try to build a model by adding few layers if required and altering the activation functions.
#Clearing backend
from tensorflow.keras import backend
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from random import shuffle
backend.clear_session()
#Fixing the seed for random number generators so that we can ensure we receive the same output everytime
np.random.seed(42)
import random
random.seed(42)
tf.random.set_seed(42)
# Define Activation Function
activation_f = "tanh"
#creating sequential model
model1=Sequential()
# First Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding and input shape of 64*64*3
model1.add(Conv2D(filters=32,kernel_size=2,padding="same",activation=activation_f,input_shape=(64,64,3)))
# max-pooling layer with a pool size of 2
model1.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model1.add(Dropout(0.2))
# Second Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding
model1.add(Conv2D(filters=32,kernel_size=2,padding="same",activation=activation_f,input_shape=(64,64,3)))
# max-pooling layer with a pool size of 2
model1.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model1.add(Dropout(0.2))
# Third Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding
model1.add(Conv2D(filters=32,kernel_size=2,padding="same",activation=activation_f,input_shape=(64,64,3)))
# max-pooling layer with a pool size of 2
model1.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model1.add(Dropout(0.2))
# forth Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding
model1.add(Conv2D(filters=32,kernel_size=2,padding="same",activation=activation_f,input_shape=(64,64,3)))
# max-pooling layer with a pool size of 2
model1.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model1.add(Dropout(0.2))
# Flatten the output from the previous layer
model1.add(Flatten())
#
model1.add(Dense(512, activation = activation_f))
model1.add(Dropout(0.4))
# Output layer with nodes equal to the number of classes and softmax activation
model1.add(Dense(2,activation="softmax")) #2 represent output layer neurons
model1.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 64, 64, 32) 416
max_pooling2d (MaxPooling2D (None, 32, 32, 32) 0
)
dropout (Dropout) (None, 32, 32, 32) 0
conv2d_1 (Conv2D) (None, 32, 32, 32) 4128
max_pooling2d_1 (MaxPooling (None, 16, 16, 32) 0
2D)
dropout_1 (Dropout) (None, 16, 16, 32) 0
conv2d_2 (Conv2D) (None, 16, 16, 32) 4128
max_pooling2d_2 (MaxPooling (None, 8, 8, 32) 0
2D)
dropout_2 (Dropout) (None, 8, 8, 32) 0
conv2d_3 (Conv2D) (None, 8, 8, 32) 4128
max_pooling2d_3 (MaxPooling (None, 4, 4, 32) 0
2D)
dropout_3 (Dropout) (None, 4, 4, 32) 0
flatten (Flatten) (None, 512) 0
dense (Dense) (None, 512) 262656
dropout_4 (Dropout) (None, 512) 0
dense_1 (Dense) (None, 2) 1026
=================================================================
Total params: 276,482
Trainable params: 276,482
Non-trainable params: 0
_________________________________________________________________
model1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
Using Callbacks
# Callbacks help in saving the checkpoints and stopping at an accuracy where the model does not seem to improve
callbacks = [EarlyStopping(monitor='val_loss', patience=2),
ModelCheckpoint('.mdl_wts.hdf5', monitor='val_loss', save_best_only=True)]
Fit and Train the model
history1=model1.fit(train_images,train_labels,batch_size=32,callbacks=callbacks,validation_split=0.2,epochs=20,verbose=1)
Epoch 1/20 624/624 [==============================] - 5s 6ms/step - loss: 0.4484 - accuracy: 0.7765 - val_loss: 0.0703 - val_accuracy: 0.9599 Epoch 2/20 624/624 [==============================] - 4s 6ms/step - loss: 0.1661 - accuracy: 0.9398 - val_loss: 0.0753 - val_accuracy: 0.9659 Epoch 3/20 624/624 [==============================] - 4s 6ms/step - loss: 0.1367 - accuracy: 0.9507 - val_loss: 0.0930 - val_accuracy: 0.9671
accuracy1 = model1.evaluate(test_images, test_labels, verbose=1)
print('\n', 'Test_Accuracy:-', accuracy1[1])
82/82 [==============================] - 0s 3ms/step - loss: 0.1537 - accuracy: 0.9515 Test_Accuracy:- 0.9515384435653687
Plotting the confusion matrix
pred1 = model1.predict(test_images)
pred1 = np.argmax(pred1,axis = 1)
y_true1 = np.argmax(test_labels,axis = 1)
#Printing the classification report
print(classification_report(y_true1,pred1))
#Plotting the heatmap using confusion matrix
cm1 = confusion_matrix(y_true1,pred1)
plt.figure(figsize=(8,5))
sns.heatmap(cm1, annot=True, fmt='.0f', xticklabels=['Uninfected', 'Parasitized'], yticklabels=['Uninfected', 'Parasitized'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
precision recall f1-score support
0 0.95 0.95 0.95 1300
1 0.95 0.95 0.95 1300
accuracy 0.95 2600
macro avg 0.95 0.95 0.95 2600
weighted avg 0.95 0.95 0.95 2600
Plotting the Train and validation curves
plot_accuracy(history1)
Observations: Model 1
| Test Accuracy | False negative out of 1,300 |
False Positive out of 1,300 |
|
|---|---|---|---|
| Base | 0.97 | 41 | 28 |
| Model 1 | 0.95 | 65 | 61 |
#Clearing backend
from tensorflow.keras import backend
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from random import shuffle
#Clearing backend
from tensorflow.keras import backend
backend.clear_session()
#Fixing the seed for random number generators
np.random.seed(42)
import random
random.seed(42)
tf.random.set_seed(42)
# Define Activation Function
activation_f = LeakyReLU(0.1)
#creating sequential model
model2=Sequential()
# First Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding and input shape of 64*64*3
model2.add(Conv2D(32, (3,3), input_shape=(64, 64, 3), padding='same', activation=activation_f))
# max-pooling layer with a pool size of 2
model2.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model2.add(Dropout(0.2))
#BatchNormalization layer
model2.add(BatchNormalization())
# Second Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding
model2.add(Conv2D(32, (3,3), input_shape=(64, 64, 3), padding='same', activation=activation_f))
# max-pooling layer with a pool size of 2
model2.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model2.add(Dropout(0.2))
#BatchNormalization layer
model2.add(BatchNormalization())
# Third Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding
model2.add(Conv2D(32, (3,3), input_shape=(64, 64, 3), padding='same', activation=activation_f))
# max-pooling layer with a pool size of 2
model2.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model2.add(Dropout(0.2))
#BatchNormalization layer
model2.add(BatchNormalization())
# forth Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding
model2.add(Conv2D(32, (3,3), input_shape=(64, 64, 3), padding='same', activation=activation_f))
# max-pooling layer with a pool size of 2
model2.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model2.add(Dropout(0.2))
# Flatten the output from the previous layer
# https://towardsdatascience.com/the-most-intuitive-and-easiest-guide-for-convolutional-neural-network-3607be47480
model2.add(Flatten())
# Hidden Layer 1
model2.add(Dense(512, activation=activation_f))
model2.add(Dropout(0.2))
# Hidden Layer 2
model2.add(Dense(512, activation=activation_f))
model2.add(Dropout(0.1))
# Output layer with nodes equal to the number of classes and softmax activation
model2.add(Dropout(0.1))
model2.add(Dense(2,activation="softmax")) #2 represent output layer neurons
# Define Optimizer
adam = optimizers.Adam(learning_rate=0.001)
model2.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 64, 64, 32) 896
max_pooling2d (MaxPooling2D (None, 32, 32, 32) 0
)
dropout (Dropout) (None, 32, 32, 32) 0
batch_normalization (BatchN (None, 32, 32, 32) 128
ormalization)
conv2d_1 (Conv2D) (None, 32, 32, 32) 9248
max_pooling2d_1 (MaxPooling (None, 16, 16, 32) 0
2D)
dropout_1 (Dropout) (None, 16, 16, 32) 0
batch_normalization_1 (Batc (None, 16, 16, 32) 128
hNormalization)
conv2d_2 (Conv2D) (None, 16, 16, 32) 9248
max_pooling2d_2 (MaxPooling (None, 8, 8, 32) 0
2D)
dropout_2 (Dropout) (None, 8, 8, 32) 0
batch_normalization_2 (Batc (None, 8, 8, 32) 128
hNormalization)
conv2d_3 (Conv2D) (None, 8, 8, 32) 9248
max_pooling2d_3 (MaxPooling (None, 4, 4, 32) 0
2D)
dropout_3 (Dropout) (None, 4, 4, 32) 0
flatten (Flatten) (None, 512) 0
dense (Dense) (None, 512) 262656
dropout_4 (Dropout) (None, 512) 0
dense_1 (Dense) (None, 512) 262656
dropout_5 (Dropout) (None, 512) 0
dropout_6 (Dropout) (None, 512) 0
dense_2 (Dense) (None, 2) 1026
=================================================================
Total params: 555,362
Trainable params: 555,170
Non-trainable params: 192
_________________________________________________________________
model2.compile(loss="binary_crossentropy", optimizer=adam, metrics = ['accuracy'])
Using callbacks
# Callbacks help in saving the checkpoints and stopping at an accuracy where the model does not seem to improve
callbacks = [EarlyStopping(monitor='val_loss', patience=2),
ModelCheckpoint('.mdl_wts.hdf5', monitor='val_loss', save_best_only=True)]
Fit and train the model
history2 = model2.fit(train_images,train_labels,batch_size=32,callbacks=callbacks,validation_split=0.2,epochs=20,verbose=1)
Epoch 1/20 624/624 [==============================] - 5s 7ms/step - loss: 0.2589 - accuracy: 0.8844 - val_loss: 0.0331 - val_accuracy: 0.9954 Epoch 2/20 624/624 [==============================] - 4s 6ms/step - loss: 0.0913 - accuracy: 0.9720 - val_loss: 0.0360 - val_accuracy: 0.9890 Epoch 3/20 624/624 [==============================] - 4s 6ms/step - loss: 0.0830 - accuracy: 0.9759 - val_loss: 0.0624 - val_accuracy: 0.9788
Plotting the train and validation accuracy
accuracy2 = model2.evaluate(test_images, test_labels, verbose=1)
print('\n', 'Test_Accuracy:-', accuracy2[1])
82/82 [==============================] - 0s 4ms/step - loss: 0.0959 - accuracy: 0.9788 Test_Accuracy:- 0.9788461327552795
# plotting the accuracies
plot_accuracy(history2)
Generate the classification report and confusion matrix
pred2 = model2.predict(test_images)
pred2 = np.argmax(pred2,axis = 1)
y_true2 = np.argmax(test_labels,axis = 1)
#Printing the classification report
print(classification_report(y_true2,pred2))
#Plotting the heatmap using confusion matrix
cm2 = confusion_matrix(y_true2,pred2)
plt.figure(figsize=(8,5))
sns.heatmap(cm2, annot=True, fmt='.0f', xticklabels=['Uninfected', 'Parasitized'], yticklabels=['Uninfected', 'Parasitized'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
precision recall f1-score support
0 0.98 0.97 0.98 1300
1 0.97 0.98 0.98 1300
accuracy 0.98 2600
macro avg 0.98 0.98 0.98 2600
weighted avg 0.98 0.98 0.98 2600
Observations: Model 2
| Test Accuracy | False negative out of 1,300 |
False Positive out of 1,300 |
|
|---|---|---|---|
| Base | 0.97 | 41 | 28 |
| Model 1 | 0.95 | 65 | 61 |
| Model 2 | 0.98 | 22 | 33 |
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train_images, train_labels, test_size=0.2, random_state=42)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Using ImageDataGenerator to generate images
# Add Noise
def add_noise(img):
VARIABILITY = 0.1 # customize this
deviation = VARIABILITY*np.random.random()
noise = np.random.normal(0, deviation, img.shape)
img += noise
#img = np.clip(img, 0., 255.)
return img
datagen_aug = ImageDataGenerator(
#rescale=1./255,
# customize these and other parameters
rotation_range=45,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0,
zoom_range=0,
#preprocessing_function=add_noise,
horizontal_flip=True,
fill_mode='nearest',
)
val_datagen = ImageDataGenerator()
# Flowing training images using train_datagen generator
train_generator = datagen_aug.flow(x = X_train, y = y_train, batch_size=64, seed=42, shuffle=True)
# Flowing validation images using val_datagen generator
val_generator = val_datagen.flow(x= X_val, y = y_val, batch_size=64, seed=42, shuffle=True)
#Creating an iterable for images and labels from the training data
images, labels = next(train_generator)
#Plotting 16 images from the training data
fig, axes = plt.subplots(4, 4, figsize = (16, 8))
fig.set_size_inches(16, 16)
for (image, label, ax) in zip(images, labels, axes.flatten()):
ax.imshow(image)
if label[1] == 1:
ax.set_title('parasitized')
else:
ax.set_title('uninfected')
ax.axis('off')
#Clearing backend
from tensorflow.keras import backend
backend.clear_session()
#Fixing the seed for random number generators
np.random.seed(42)
import random
random.seed(42)
tf.random.set_seed(42)
# Define Activation Function
activation_f = LeakyReLU(0.1)
#creating sequential model
model6=Sequential()
# First Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding and input shape of 64*64*3
model6.add(Conv2D(32, (3,3), input_shape=(64, 64, 3), padding='same', activation=activation_f))
# max-pooling layer with a pool size of 2
model6.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model6.add(Dropout(0.2))
#BatchNormalization layer
model6.add(BatchNormalization())
# Second Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding
model6.add(Conv2D(32, (3,3), input_shape=(64, 64, 3), padding='same', activation=activation_f))
# max-pooling layer with a pool size of 2
model6.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model6.add(Dropout(0.2))
#BatchNormalization layer
model6.add(BatchNormalization())
# Third Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding
model6.add(Conv2D(32, (3,3), input_shape=(64, 64, 3), padding='same', activation=activation_f))
# max-pooling layer with a pool size of 2
model6.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model6.add(Dropout(0.2))
#BatchNormalization layer
model6.add(BatchNormalization())
# forth Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding
model6.add(Conv2D(32, (3,3), input_shape=(64, 64, 3), padding='same', activation=activation_f))
# max-pooling layer with a pool size of 2
model6.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model6.add(Dropout(0.2))
# Flatten the output from the previous layer
# https://towardsdatascience.com/the-most-intuitive-and-easiest-guide-for-convolutional-neural-network-3607be47480
model6.add(Flatten())
# Hidden Layer 1
model6.add(Dense(512, activation=activation_f))
model6.add(Dropout(0.2))
# Hidden Layer 2
model6.add(Dense(512, activation=activation_f))
model6.add(Dropout(0.1))
# Output layer with nodes equal to the number of classes and softmax activation
model6.add(Dropout(0.1))
model6.add(Dense(2,activation="softmax")) #2 represent output layer neurons
# Define Optimizer
adam = optimizers.Adam(learning_rate=0.001)
# Compile the Model
model6.compile(loss="binary_crossentropy", optimizer=adam, metrics = ['accuracy'])
model6.summary()
# Callbacks help in saving the checkpoints and stopping at an accuracy where the model does not seem to improve
callbacks = [EarlyStopping(monitor='val_loss', patience=2),
ModelCheckpoint('.mdl_wts.hdf5', monitor='val_loss', save_best_only=True)]
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 64, 64, 32) 896
max_pooling2d (MaxPooling2D (None, 32, 32, 32) 0
)
dropout (Dropout) (None, 32, 32, 32) 0
batch_normalization (BatchN (None, 32, 32, 32) 128
ormalization)
conv2d_1 (Conv2D) (None, 32, 32, 32) 9248
max_pooling2d_1 (MaxPooling (None, 16, 16, 32) 0
2D)
dropout_1 (Dropout) (None, 16, 16, 32) 0
batch_normalization_1 (Batc (None, 16, 16, 32) 128
hNormalization)
conv2d_2 (Conv2D) (None, 16, 16, 32) 9248
max_pooling2d_2 (MaxPooling (None, 8, 8, 32) 0
2D)
dropout_2 (Dropout) (None, 8, 8, 32) 0
batch_normalization_2 (Batc (None, 8, 8, 32) 128
hNormalization)
conv2d_3 (Conv2D) (None, 8, 8, 32) 9248
max_pooling2d_3 (MaxPooling (None, 4, 4, 32) 0
2D)
dropout_3 (Dropout) (None, 4, 4, 32) 0
flatten (Flatten) (None, 512) 0
dense (Dense) (None, 512) 262656
dropout_4 (Dropout) (None, 512) 0
dense_1 (Dense) (None, 512) 262656
dropout_5 (Dropout) (None, 512) 0
dropout_6 (Dropout) (None, 512) 0
dense_2 (Dense) (None, 2) 1026
=================================================================
Total params: 555,362
Trainable params: 555,170
Non-trainable params: 192
_________________________________________________________________
history6 = model6.fit(train_generator,
validation_data=val_generator,
batch_size=32,callbacks=callbacks,
epochs=20,verbose=1)
Epoch 1/20 312/312 [==============================] - 22s 66ms/step - loss: 0.3825 - accuracy: 0.8263 - val_loss: 0.7426 - val_accuracy: 0.5956 Epoch 2/20 312/312 [==============================] - 20s 65ms/step - loss: 0.1706 - accuracy: 0.9439 - val_loss: 0.1155 - val_accuracy: 0.9710 Epoch 3/20 312/312 [==============================] - 20s 65ms/step - loss: 0.1487 - accuracy: 0.9540 - val_loss: 0.0982 - val_accuracy: 0.9802 Epoch 4/20 312/312 [==============================] - 21s 66ms/step - loss: 0.1407 - accuracy: 0.9570 - val_loss: 0.1100 - val_accuracy: 0.9750 Epoch 5/20 312/312 [==============================] - 20s 65ms/step - loss: 0.1360 - accuracy: 0.9577 - val_loss: 0.0855 - val_accuracy: 0.9786 Epoch 6/20 312/312 [==============================] - 21s 67ms/step - loss: 0.1360 - accuracy: 0.9575 - val_loss: 0.0918 - val_accuracy: 0.9780 Epoch 7/20 312/312 [==============================] - 20s 65ms/step - loss: 0.1238 - accuracy: 0.9613 - val_loss: 0.0802 - val_accuracy: 0.9744 Epoch 8/20 312/312 [==============================] - 21s 66ms/step - loss: 0.1246 - accuracy: 0.9600 - val_loss: 0.0645 - val_accuracy: 0.9808 Epoch 9/20 312/312 [==============================] - 20s 65ms/step - loss: 0.1215 - accuracy: 0.9615 - val_loss: 0.0711 - val_accuracy: 0.9778 Epoch 10/20 312/312 [==============================] - 20s 66ms/step - loss: 0.1198 - accuracy: 0.9636 - val_loss: 0.0685 - val_accuracy: 0.9804
# Evaluating the model on test data
accuracy6 = model6.evaluate(test_images, test_labels, verbose=1)
print('\n', 'Test_Accuracy:-', accuracy6[1])
82/82 [==============================] - 0s 4ms/step - loss: 0.0594 - accuracy: 0.9815 Test_Accuracy:- 0.9815384745597839
# plotting the accuracies
plot_accuracy(history6)
pred6 = model6.predict(test_images)
pred6 = np.argmax(pred6,axis = 1)
y_true6 = np.argmax(test_labels,axis = 1)
#Printing the classification report
print(classification_report(y_true6,pred6))
#Plotting the heatmap using confusion matrix
cm6 = confusion_matrix(y_true6,pred6)
plt.figure(figsize=(8,5))
sns.heatmap(cm6, annot=True, fmt='.0f', xticklabels=['Uninfected', 'Parasitized'], yticklabels=['Uninfected', 'Parasitized'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
precision recall f1-score support
0 0.98 0.98 0.98 1300
1 0.98 0.98 0.98 1300
accuracy 0.98 2600
macro avg 0.98 0.98 0.98 2600
weighted avg 0.98 0.98 0.98 2600
Observations: Model 3
| Test Accuracy | False negative out of 1,300 |
False Positive out of 1,300 |
|
|---|---|---|---|
| Base | 0.97 | 41 | 28 |
| Model 1 | 0.95 | 65 | 61 |
| Model 2 | 0.98 | 22 | 33 |
| Model 3 | 0.98 | 23 | 25 |
bold text
#Clearing backend
from tensorflow.keras import backend
backend.clear_session()
#Fixing the seed for random number generators
np.random.seed(42)
import random
random.seed(42)
tf.random.set_seed(42)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
#Feature Learning
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras import Model
vgg = VGG16(include_top=False, weights='imagenet', input_shape=(64,64,3))
vgg.summary()
Model: "vgg16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 64, 64, 3)] 0
block1_conv1 (Conv2D) (None, 64, 64, 64) 1792
block1_conv2 (Conv2D) (None, 64, 64, 64) 36928
block1_pool (MaxPooling2D) (None, 32, 32, 64) 0
block2_conv1 (Conv2D) (None, 32, 32, 128) 73856
block2_conv2 (Conv2D) (None, 32, 32, 128) 147584
block2_pool (MaxPooling2D) (None, 16, 16, 128) 0
block3_conv1 (Conv2D) (None, 16, 16, 256) 295168
block3_conv2 (Conv2D) (None, 16, 16, 256) 590080
block3_conv3 (Conv2D) (None, 16, 16, 256) 590080
block3_pool (MaxPooling2D) (None, 8, 8, 256) 0
block4_conv1 (Conv2D) (None, 8, 8, 512) 1180160
block4_conv2 (Conv2D) (None, 8, 8, 512) 2359808
block4_conv3 (Conv2D) (None, 8, 8, 512) 2359808
block4_pool (MaxPooling2D) (None, 4, 4, 512) 0
block5_conv1 (Conv2D) (None, 4, 4, 512) 2359808
block5_conv2 (Conv2D) (None, 4, 4, 512) 2359808
block5_conv3 (Conv2D) (None, 4, 4, 512) 2359808
block5_pool (MaxPooling2D) (None, 2, 2, 512) 0
=================================================================
Total params: 14,714,688
Trainable params: 14,714,688
Non-trainable params: 0
_________________________________________________________________
transfer_layer = vgg.get_layer('block5_pool')
vgg.trainable=False
# Add classification layers on top of it
x = Flatten()(transfer_layer.output) #Flatten the output from the 5th block of the VGG16 model
x = Dense(256, activation='relu')(x)
# Similarly add a dense layer with 128 neurons
x = Dense(128, activation='relu')(x)
x = Dropout(0.3)(x)
# Add a dense layer with 64 neurons
x = Dense(64, activation='relu')(x)
x = BatchNormalization()(x)
pred = Dense(2, activation='softmax')(x)
model4 = Model(vgg.input, pred) #Initializing the model
model4.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adamax(learning_rate = 0.0005),
metrics=['accuracy'])
using callbacks
# Callbacks help in saving the checkpoints and stopping at an accuracy where the model does not seem to improve
callbacks = [EarlyStopping(monitor='val_loss', patience=2),
ModelCheckpoint('.mdl_wts.hdf5', monitor='val_loss', save_best_only=True)]
Fit and Train the model
#Fitting the model and running the model for 10 epochs
history4 = model4.fit(
train_images, train_labels,
epochs=10,
batch_size=32,
validation_split=0.1,
verbose=2
)
Epoch 1/10 702/702 - 10s - loss: 0.2711 - accuracy: 0.8891 - val_loss: 0.3317 - val_accuracy: 0.8590 - 10s/epoch - 14ms/step Epoch 2/10 702/702 - 9s - loss: 0.1866 - accuracy: 0.9289 - val_loss: 0.1367 - val_accuracy: 0.9515 - 9s/epoch - 12ms/step Epoch 3/10 702/702 - 9s - loss: 0.1705 - accuracy: 0.9360 - val_loss: 0.1483 - val_accuracy: 0.9479 - 9s/epoch - 12ms/step Epoch 4/10 702/702 - 9s - loss: 0.1602 - accuracy: 0.9394 - val_loss: 0.2282 - val_accuracy: 0.9058 - 9s/epoch - 12ms/step Epoch 5/10 702/702 - 9s - loss: 0.1507 - accuracy: 0.9451 - val_loss: 0.3445 - val_accuracy: 0.8610 - 9s/epoch - 12ms/step Epoch 6/10 702/702 - 9s - loss: 0.1458 - accuracy: 0.9453 - val_loss: 0.1505 - val_accuracy: 0.9463 - 9s/epoch - 12ms/step Epoch 7/10 702/702 - 9s - loss: 0.1412 - accuracy: 0.9493 - val_loss: 0.1397 - val_accuracy: 0.9459 - 9s/epoch - 12ms/step Epoch 8/10 702/702 - 9s - loss: 0.1372 - accuracy: 0.9496 - val_loss: 0.1505 - val_accuracy: 0.9475 - 9s/epoch - 12ms/step Epoch 9/10 702/702 - 9s - loss: 0.1326 - accuracy: 0.9517 - val_loss: 0.0958 - val_accuracy: 0.9696 - 9s/epoch - 12ms/step Epoch 10/10 702/702 - 9s - loss: 0.1266 - accuracy: 0.9559 - val_loss: 0.0517 - val_accuracy: 0.9852 - 9s/epoch - 12ms/step
Plot the train and validation accuracy
# plotting the accuracies
plot_accuracy(history4)
accuracy4 = model4.evaluate(test_images, test_labels, verbose=1)
print('\n', 'Test_Accuracy:-', accuracy4[1])
82/82 [==============================] - 1s 12ms/step - loss: 0.1248 - accuracy: 0.9508 Test_Accuracy:- 0.9507692456245422
# Evaluating the model on test data
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
pred4 = model4.predict(test_images)
pred4 = np.argmax(pred4,axis = 1)
y_true4 = np.argmax(test_labels,axis = 1)
#Printing the classification report
print(classification_report(y_true4,pred4))
#Plotting the heatmap using confusion matrix
cm4 = confusion_matrix(y_true4,pred4)
plt.figure(figsize=(8,5))
sns.heatmap(cm4, annot=True, fmt='.0f', xticklabels=['Uninfected', 'Parasitized'], yticklabels=['Uninfected', 'Parasitized'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
precision recall f1-score support
0 0.93 0.98 0.95 1300
1 0.98 0.92 0.95 1300
accuracy 0.95 2600
macro avg 0.95 0.95 0.95 2600
weighted avg 0.95 0.95 0.95 2600
Observations (thus far): Model 4
#Clearing backend
from tensorflow.keras import backend
backend.clear_session()
#Fixing the seed for random number generators
np.random.seed(42)
import random
random.seed(42)
tf.random.set_seed(42)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
model4.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adamax(learning_rate = 0.0005),
metrics=['accuracy'])
df_accuracy = pd.DataFrame()
df_val_accuracy = pd.DataFrame()
i=2
pat=2
for i in range(2, 6):
# Callbacks help in saving the checkpoints and stopping at an accuracy where the model does not seem to improve
callbacks = [EarlyStopping(monitor='val_loss', patience=i),
ModelCheckpoint('.mdl_wts.hdf5', monitor='val_loss', save_best_only=True)]
#Fitting the model and running the model for 20 epochs
history4 = model4.fit(
train_images, train_labels,
epochs=20,
batch_size=32,
validation_split=0.1,
verbose=2
)
#print(history4.history.keys()) https://stackoverflow.com/questions/36952763/how-to-return-history-of-validation-loss-in-keras
df_accuracy = df_accuracy.append(history4.history['accuracy'])
df_val_accuracy = df_val_accuracy.append(history4.history['val_accuracy'])
i += 1
# plotting the accuracies
plot_accuracy(history4)
Epoch 1/20 702/702 - 10s - loss: 0.1262 - accuracy: 0.9536 - val_loss: 0.1815 - val_accuracy: 0.9299 - 10s/epoch - 14ms/step Epoch 2/20 702/702 - 9s - loss: 0.1186 - accuracy: 0.9589 - val_loss: 0.0576 - val_accuracy: 0.9828 - 9s/epoch - 12ms/step Epoch 3/20 702/702 - 9s - loss: 0.1166 - accuracy: 0.9582 - val_loss: 0.2063 - val_accuracy: 0.9267 - 9s/epoch - 12ms/step Epoch 4/20 702/702 - 9s - loss: 0.1119 - accuracy: 0.9598 - val_loss: 0.2169 - val_accuracy: 0.9267 - 9s/epoch - 13ms/step Epoch 5/20 702/702 - 9s - loss: 0.1104 - accuracy: 0.9609 - val_loss: 0.0929 - val_accuracy: 0.9708 - 9s/epoch - 12ms/step Epoch 6/20 702/702 - 9s - loss: 0.1064 - accuracy: 0.9601 - val_loss: 0.1192 - val_accuracy: 0.9567 - 9s/epoch - 13ms/step Epoch 7/20 702/702 - 9s - loss: 0.1036 - accuracy: 0.9623 - val_loss: 0.1867 - val_accuracy: 0.9295 - 9s/epoch - 12ms/step Epoch 8/20 702/702 - 9s - loss: 0.1021 - accuracy: 0.9645 - val_loss: 0.2094 - val_accuracy: 0.9287 - 9s/epoch - 12ms/step Epoch 9/20 702/702 - 9s - loss: 0.0995 - accuracy: 0.9634 - val_loss: 0.0559 - val_accuracy: 0.9808 - 9s/epoch - 12ms/step Epoch 10/20 702/702 - 9s - loss: 0.0944 - accuracy: 0.9668 - val_loss: 0.0620 - val_accuracy: 0.9796 - 9s/epoch - 13ms/step Epoch 11/20 702/702 - 9s - loss: 0.0945 - accuracy: 0.9664 - val_loss: 0.0445 - val_accuracy: 0.9872 - 9s/epoch - 12ms/step Epoch 12/20 702/702 - 9s - loss: 0.0920 - accuracy: 0.9676 - val_loss: 0.1954 - val_accuracy: 0.9319 - 9s/epoch - 12ms/step Epoch 13/20 702/702 - 9s - loss: 0.0899 - accuracy: 0.9688 - val_loss: 0.1092 - val_accuracy: 0.9675 - 9s/epoch - 12ms/step Epoch 14/20 702/702 - 9s - loss: 0.0862 - accuracy: 0.9691 - val_loss: 0.2943 - val_accuracy: 0.8898 - 9s/epoch - 12ms/step Epoch 15/20 702/702 - 9s - loss: 0.0817 - accuracy: 0.9708 - val_loss: 0.1892 - val_accuracy: 0.9383 - 9s/epoch - 12ms/step Epoch 16/20 702/702 - 9s - loss: 0.0791 - accuracy: 0.9725 - val_loss: 0.0951 - val_accuracy: 0.9728 - 9s/epoch - 12ms/step Epoch 17/20 702/702 - 9s - loss: 0.0756 - accuracy: 0.9739 - val_loss: 0.4083 - val_accuracy: 0.8614 - 9s/epoch - 12ms/step Epoch 18/20 702/702 - 9s - loss: 0.0730 - accuracy: 0.9748 - val_loss: 0.0875 - val_accuracy: 0.9716 - 9s/epoch - 12ms/step Epoch 19/20 702/702 - 9s - loss: 0.0741 - accuracy: 0.9744 - val_loss: 0.0331 - val_accuracy: 0.9908 - 9s/epoch - 12ms/step Epoch 20/20 702/702 - 9s - loss: 0.0751 - accuracy: 0.9732 - val_loss: 0.3682 - val_accuracy: 0.8722 - 9s/epoch - 12ms/step Epoch 1/20 702/702 - 9s - loss: 0.0687 - accuracy: 0.9757 - val_loss: 0.0986 - val_accuracy: 0.9708 - 9s/epoch - 12ms/step Epoch 2/20 702/702 - 9s - loss: 0.0659 - accuracy: 0.9771 - val_loss: 0.1018 - val_accuracy: 0.9692 - 9s/epoch - 12ms/step Epoch 3/20 702/702 - 9s - loss: 0.0651 - accuracy: 0.9766 - val_loss: 0.1516 - val_accuracy: 0.9523 - 9s/epoch - 12ms/step Epoch 4/20 702/702 - 9s - loss: 0.0609 - accuracy: 0.9784 - val_loss: 0.2399 - val_accuracy: 0.9203 - 9s/epoch - 12ms/step Epoch 5/20 702/702 - 9s - loss: 0.0590 - accuracy: 0.9795 - val_loss: 0.0983 - val_accuracy: 0.9740 - 9s/epoch - 12ms/step Epoch 6/20 702/702 - 9s - loss: 0.0609 - accuracy: 0.9797 - val_loss: 0.0711 - val_accuracy: 0.9800 - 9s/epoch - 12ms/step Epoch 7/20 702/702 - 9s - loss: 0.0601 - accuracy: 0.9789 - val_loss: 0.2910 - val_accuracy: 0.9139 - 9s/epoch - 12ms/step Epoch 8/20 702/702 - 9s - loss: 0.0569 - accuracy: 0.9803 - val_loss: 0.1987 - val_accuracy: 0.9443 - 9s/epoch - 12ms/step Epoch 9/20 702/702 - 9s - loss: 0.0521 - accuracy: 0.9830 - val_loss: 0.0780 - val_accuracy: 0.9796 - 9s/epoch - 12ms/step Epoch 10/20 702/702 - 9s - loss: 0.0511 - accuracy: 0.9820 - val_loss: 0.1176 - val_accuracy: 0.9675 - 9s/epoch - 12ms/step Epoch 11/20 702/702 - 9s - loss: 0.0512 - accuracy: 0.9821 - val_loss: 0.0538 - val_accuracy: 0.9860 - 9s/epoch - 12ms/step Epoch 12/20 702/702 - 9s - loss: 0.0492 - accuracy: 0.9833 - val_loss: 0.1906 - val_accuracy: 0.9471 - 9s/epoch - 12ms/step Epoch 13/20 702/702 - 9s - loss: 0.0489 - accuracy: 0.9828 - val_loss: 0.2286 - val_accuracy: 0.9331 - 9s/epoch - 12ms/step Epoch 14/20 702/702 - 9s - loss: 0.0499 - accuracy: 0.9825 - val_loss: 0.2696 - val_accuracy: 0.9283 - 9s/epoch - 12ms/step Epoch 15/20 702/702 - 9s - loss: 0.0447 - accuracy: 0.9848 - val_loss: 0.1431 - val_accuracy: 0.9611 - 9s/epoch - 12ms/step Epoch 16/20 702/702 - 9s - loss: 0.0436 - accuracy: 0.9859 - val_loss: 0.1307 - val_accuracy: 0.9667 - 9s/epoch - 12ms/step Epoch 17/20 702/702 - 9s - loss: 0.0394 - accuracy: 0.9883 - val_loss: 0.5504 - val_accuracy: 0.8317 - 9s/epoch - 12ms/step Epoch 18/20 702/702 - 9s - loss: 0.0425 - accuracy: 0.9849 - val_loss: 0.1382 - val_accuracy: 0.9623 - 9s/epoch - 12ms/step Epoch 19/20 702/702 - 9s - loss: 0.0391 - accuracy: 0.9868 - val_loss: 0.0531 - val_accuracy: 0.9880 - 9s/epoch - 12ms/step Epoch 20/20 702/702 - 9s - loss: 0.0431 - accuracy: 0.9851 - val_loss: 0.1894 - val_accuracy: 0.9475 - 9s/epoch - 12ms/step Epoch 1/20 702/702 - 9s - loss: 0.0358 - accuracy: 0.9877 - val_loss: 0.1691 - val_accuracy: 0.9607 - 9s/epoch - 12ms/step Epoch 2/20 702/702 - 9s - loss: 0.0377 - accuracy: 0.9868 - val_loss: 0.1216 - val_accuracy: 0.9704 - 9s/epoch - 12ms/step Epoch 3/20 702/702 - 9s - loss: 0.0378 - accuracy: 0.9866 - val_loss: 0.2642 - val_accuracy: 0.9315 - 9s/epoch - 12ms/step Epoch 4/20 702/702 - 9s - loss: 0.0349 - accuracy: 0.9878 - val_loss: 0.1824 - val_accuracy: 0.9539 - 9s/epoch - 12ms/step Epoch 5/20 702/702 - 9s - loss: 0.0330 - accuracy: 0.9885 - val_loss: 0.1509 - val_accuracy: 0.9611 - 9s/epoch - 12ms/step Epoch 6/20 702/702 - 9s - loss: 0.0343 - accuracy: 0.9890 - val_loss: 0.0885 - val_accuracy: 0.9820 - 9s/epoch - 12ms/step Epoch 7/20 702/702 - 9s - loss: 0.0314 - accuracy: 0.9895 - val_loss: 0.2497 - val_accuracy: 0.9267 - 9s/epoch - 12ms/step Epoch 8/20 702/702 - 9s - loss: 0.0319 - accuracy: 0.9894 - val_loss: 0.2601 - val_accuracy: 0.9347 - 9s/epoch - 12ms/step Epoch 9/20 702/702 - 9s - loss: 0.0332 - accuracy: 0.9893 - val_loss: 0.1391 - val_accuracy: 0.9708 - 9s/epoch - 12ms/step Epoch 10/20 702/702 - 9s - loss: 0.0302 - accuracy: 0.9899 - val_loss: 0.1098 - val_accuracy: 0.9776 - 9s/epoch - 12ms/step Epoch 11/20 702/702 - 9s - loss: 0.0296 - accuracy: 0.9903 - val_loss: 0.1419 - val_accuracy: 0.9708 - 9s/epoch - 12ms/step Epoch 12/20 702/702 - 9s - loss: 0.0296 - accuracy: 0.9903 - val_loss: 0.2518 - val_accuracy: 0.9319 - 9s/epoch - 12ms/step Epoch 13/20 702/702 - 9s - loss: 0.0275 - accuracy: 0.9905 - val_loss: 0.1514 - val_accuracy: 0.9631 - 9s/epoch - 12ms/step Epoch 14/20 702/702 - 9s - loss: 0.0278 - accuracy: 0.9905 - val_loss: 0.2465 - val_accuracy: 0.9435 - 9s/epoch - 12ms/step Epoch 15/20 702/702 - 9s - loss: 0.0267 - accuracy: 0.9908 - val_loss: 0.1607 - val_accuracy: 0.9671 - 9s/epoch - 12ms/step Epoch 16/20 702/702 - 9s - loss: 0.0269 - accuracy: 0.9911 - val_loss: 0.1185 - val_accuracy: 0.9724 - 9s/epoch - 12ms/step Epoch 17/20 702/702 - 9s - loss: 0.0267 - accuracy: 0.9915 - val_loss: 0.4975 - val_accuracy: 0.8742 - 9s/epoch - 12ms/step Epoch 18/20 702/702 - 9s - loss: 0.0250 - accuracy: 0.9920 - val_loss: 0.2620 - val_accuracy: 0.9323 - 9s/epoch - 12ms/step Epoch 19/20 702/702 - 9s - loss: 0.0234 - accuracy: 0.9926 - val_loss: 0.1395 - val_accuracy: 0.9712 - 9s/epoch - 12ms/step Epoch 20/20 702/702 - 9s - loss: 0.0235 - accuracy: 0.9917 - val_loss: 0.1672 - val_accuracy: 0.9692 - 9s/epoch - 12ms/step Epoch 1/20 702/702 - 9s - loss: 0.0237 - accuracy: 0.9920 - val_loss: 0.1391 - val_accuracy: 0.9692 - 9s/epoch - 12ms/step Epoch 2/20 702/702 - 9s - loss: 0.0223 - accuracy: 0.9928 - val_loss: 0.1689 - val_accuracy: 0.9631 - 9s/epoch - 12ms/step Epoch 3/20 702/702 - 9s - loss: 0.0234 - accuracy: 0.9924 - val_loss: 0.2489 - val_accuracy: 0.9419 - 9s/epoch - 12ms/step Epoch 4/20 702/702 - 9s - loss: 0.0213 - accuracy: 0.9932 - val_loss: 0.3743 - val_accuracy: 0.9099 - 9s/epoch - 12ms/step Epoch 5/20 702/702 - 9s - loss: 0.0211 - accuracy: 0.9930 - val_loss: 0.2020 - val_accuracy: 0.9571 - 9s/epoch - 12ms/step Epoch 6/20 702/702 - 9s - loss: 0.0220 - accuracy: 0.9927 - val_loss: 0.1627 - val_accuracy: 0.9631 - 9s/epoch - 12ms/step Epoch 7/20 702/702 - 9s - loss: 0.0182 - accuracy: 0.9941 - val_loss: 0.2523 - val_accuracy: 0.9407 - 9s/epoch - 12ms/step Epoch 8/20 702/702 - 9s - loss: 0.0198 - accuracy: 0.9935 - val_loss: 0.1814 - val_accuracy: 0.9611 - 9s/epoch - 12ms/step Epoch 9/20 702/702 - 9s - loss: 0.0209 - accuracy: 0.9929 - val_loss: 0.1339 - val_accuracy: 0.9728 - 9s/epoch - 12ms/step Epoch 10/20 702/702 - 9s - loss: 0.0187 - accuracy: 0.9932 - val_loss: 0.1793 - val_accuracy: 0.9623 - 9s/epoch - 12ms/step Epoch 11/20 702/702 - 9s - loss: 0.0198 - accuracy: 0.9936 - val_loss: 0.1357 - val_accuracy: 0.9752 - 9s/epoch - 12ms/step Epoch 12/20 702/702 - 9s - loss: 0.0208 - accuracy: 0.9930 - val_loss: 0.3406 - val_accuracy: 0.9255 - 9s/epoch - 12ms/step Epoch 13/20 702/702 - 9s - loss: 0.0221 - accuracy: 0.9927 - val_loss: 0.2493 - val_accuracy: 0.9443 - 9s/epoch - 12ms/step Epoch 14/20 702/702 - 9s - loss: 0.0200 - accuracy: 0.9938 - val_loss: 0.2690 - val_accuracy: 0.9415 - 9s/epoch - 12ms/step Epoch 15/20 702/702 - 9s - loss: 0.0182 - accuracy: 0.9944 - val_loss: 0.1616 - val_accuracy: 0.9671 - 9s/epoch - 12ms/step Epoch 16/20 702/702 - 9s - loss: 0.0167 - accuracy: 0.9940 - val_loss: 0.1954 - val_accuracy: 0.9619 - 9s/epoch - 12ms/step Epoch 17/20 702/702 - 9s - loss: 0.0176 - accuracy: 0.9950 - val_loss: 0.4086 - val_accuracy: 0.9147 - 9s/epoch - 12ms/step Epoch 18/20 702/702 - 9s - loss: 0.0172 - accuracy: 0.9948 - val_loss: 0.2906 - val_accuracy: 0.9359 - 9s/epoch - 12ms/step Epoch 19/20 702/702 - 9s - loss: 0.0205 - accuracy: 0.9932 - val_loss: 0.1849 - val_accuracy: 0.9647 - 9s/epoch - 12ms/step Epoch 20/20 702/702 - 9s - loss: 0.0151 - accuracy: 0.9954 - val_loss: 0.1367 - val_accuracy: 0.9752 - 9s/epoch - 12ms/step
df = pd.concat([df_accuracy, df_val_accuracy], axis=1)
df1 = df.copy()
df1.index.name='epoch'
df1.columns =['accuracy', 'val_accuracy']
df1.reset_index(inplace=True)
df1 = df1.assign(Patience=" ")
# iterate through each row and select
pat=2
i=0
df1.at[i,'Patience']=pat
for i in range(len(df1)):
if df1.iat[i,0] == 0 and i==0:
df1.at[i,'Patience']=pat
elif df1.iat[i,0] == 0 and i!=0:
pat=pat+1
df1.at[i,'Patience']=pat
else:
df1.at[i,'Patience']=pat
#pat=pat+1
i=i+1
fig, ax =plt.subplots(1,2)
sns.lineplot('epoch', 'accuracy', data=df1, hue="Patience",style="Patience", palette="tab10", linewidth=2.5, ax=ax[0]).set(title='VGG16 Model: Training Accuracy')
sns.lineplot('epoch', 'val_accuracy', data=df1, hue="Patience",style="Patience", palette="tab10", linewidth=2.5, ax=ax[1]).set(title='VGG16 Model: Validation Accuracy')
fig.set_size_inches( 16, 10)
fig.show()
# Accuracy For top Model
accuracy4 = model4.evaluate(test_images, test_labels, verbose=1)
print('\n', 'Test_Accuracy:-', accuracy4[1])
82/82 [==============================] - 1s 12ms/step - loss: 0.2784 - accuracy: 0.9446 Test_Accuracy:- 0.944615364074707
# Evaluating the model on test data
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
pred4 = model4.predict(test_images)
pred4 = np.argmax(pred4,axis = 1)
y_true4 = np.argmax(test_labels,axis = 1)
#Printing the classification report
print(classification_report(y_true4,pred4))
#Plotting the heatmap using confusion matrix
cm4 = confusion_matrix(y_true4,pred4)
plt.figure(figsize=(8,5))
sns.heatmap(cm4, annot=True, fmt='.0f', xticklabels=['Uninfected', 'Parasitized'], yticklabels=['Uninfected', 'Parasitized'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
precision recall f1-score support
0 0.95 0.94 0.94 1300
1 0.94 0.95 0.94 1300
accuracy 0.94 2600
macro avg 0.94 0.94 0.94 2600
weighted avg 0.94 0.94 0.94 2600
Observations (thus far): Model 4
#Clearing backend
from tensorflow.keras import backend
backend.clear_session()
#Fixing the seed for random number generators
np.random.seed(42)
import random
random.seed(42)
tf.random.set_seed(42)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
def build_model(optimizer):
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras import Model
vgg = VGG16(include_top=False, weights='imagenet', input_shape=(64,64,3))
transfer_layer = vgg.get_layer('block5_pool')
vgg.trainable=False
# Add classification layers on top of it
x = Flatten()(transfer_layer.output) #Flatten the output from the 5th block of the VGG16 model
x = Dense(256, activation='relu')(x)
# Similarly add a dense layer with 128 neurons
x = Dense(128, activation='relu')(x)
x = Dropout(0.3)(x)
# Add a dense layer with 64 neurons
x = Dense(64, activation='relu')(x)
x = BatchNormalization()(x)
pred = Dense(2, activation='softmax')(x)
model4 = Model(vgg.input, pred) #Initializing the model
model4.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model4
Reference: Some Optimizer code came from https://www.analyticsvidhya.com/blog/2021/10/a-comprehensive-guide-on-deep-learning-optimizers/#:~:text=An%20optimizer%20is%20a%20function,loss%20and%20improve%20the%20accuracy
#Loop Through Optimizers
df_optimizer_comp = pd.DataFrame(columns=['Optimizer', 'Accuracy', 'Loss', 'Val_accuracy', 'Val_Loss'])
optimizer = ['Adadelta', 'Adagrad', 'Adam', 'RMSprop', 'SGD', tf.keras.optimizers.Adamax(learning_rate = 0.0005)]
for i in optimizer:
model4 = build_model(i)
#Fitting the model and running the model for 10 epochs
epochs=10
history4 = model4.fit(
train_images, train_labels,
epochs=epochs,
batch_size=32,
validation_split=0.1,
verbose=2
)
temp_df = pd.DataFrame(
{
'Optimizer': i,
#'Epoch': history4.history['Epoch']
'Accuracy': history4.history['accuracy'],
'Loss': history4.history['loss'],
'Val_accuracy': history4.history['val_accuracy'],
'Val_Loss': history4.history['loss']
}
)
df_optimizer_comp = df_optimizer_comp.append(temp_df, ignore_index=True)
# Delete multiple columns from the dataframe
temp_df = temp_df.drop(['Optimizer', 'Accuracy', 'Loss', 'Val_accuracy', 'Val_Loss'], axis=1)
Epoch 1/10 702/702 - 10s - loss: 0.8024 - accuracy: 0.5907 - val_loss: 0.8551 - val_accuracy: 0.4014 - 10s/epoch - 14ms/step Epoch 2/10 702/702 - 9s - loss: 0.6743 - accuracy: 0.6637 - val_loss: 0.7535 - val_accuracy: 0.5581 - 9s/epoch - 12ms/step Epoch 3/10 702/702 - 9s - loss: 0.6076 - accuracy: 0.7025 - val_loss: 0.6573 - val_accuracy: 0.6322 - 9s/epoch - 12ms/step Epoch 4/10 702/702 - 9s - loss: 0.5548 - accuracy: 0.7303 - val_loss: 0.5895 - val_accuracy: 0.6935 - 9s/epoch - 12ms/step Epoch 5/10 702/702 - 9s - loss: 0.5115 - accuracy: 0.7556 - val_loss: 0.5214 - val_accuracy: 0.7380 - 9s/epoch - 12ms/step Epoch 6/10 702/702 - 9s - loss: 0.4805 - accuracy: 0.7742 - val_loss: 0.4821 - val_accuracy: 0.7652 - 9s/epoch - 12ms/step Epoch 7/10 702/702 - 9s - loss: 0.4560 - accuracy: 0.7898 - val_loss: 0.4276 - val_accuracy: 0.8037 - 9s/epoch - 13ms/step Epoch 8/10 702/702 - 9s - loss: 0.4361 - accuracy: 0.7976 - val_loss: 0.4030 - val_accuracy: 0.8157 - 9s/epoch - 12ms/step Epoch 9/10 702/702 - 9s - loss: 0.4109 - accuracy: 0.8137 - val_loss: 0.3865 - val_accuracy: 0.8249 - 9s/epoch - 12ms/step Epoch 10/10 702/702 - 9s - loss: 0.4000 - accuracy: 0.8195 - val_loss: 0.3639 - val_accuracy: 0.8333 - 9s/epoch - 12ms/step Epoch 1/10 702/702 - 10s - loss: 0.3623 - accuracy: 0.8410 - val_loss: 0.2867 - val_accuracy: 0.8706 - 10s/epoch - 14ms/step Epoch 2/10 702/702 - 9s - loss: 0.2364 - accuracy: 0.9084 - val_loss: 0.3435 - val_accuracy: 0.8454 - 9s/epoch - 12ms/step Epoch 3/10 702/702 - 9s - loss: 0.2121 - accuracy: 0.9204 - val_loss: 0.1685 - val_accuracy: 0.9271 - 9s/epoch - 12ms/step Epoch 4/10 702/702 - 9s - loss: 0.1986 - accuracy: 0.9257 - val_loss: 0.1691 - val_accuracy: 0.9311 - 9s/epoch - 12ms/step Epoch 5/10 702/702 - 9s - loss: 0.1858 - accuracy: 0.9314 - val_loss: 0.1662 - val_accuracy: 0.9347 - 9s/epoch - 12ms/step Epoch 6/10 702/702 - 9s - loss: 0.1784 - accuracy: 0.9329 - val_loss: 0.1729 - val_accuracy: 0.9319 - 9s/epoch - 12ms/step Epoch 7/10 702/702 - 9s - loss: 0.1742 - accuracy: 0.9358 - val_loss: 0.2111 - val_accuracy: 0.9091 - 9s/epoch - 12ms/step Epoch 8/10 702/702 - 9s - loss: 0.1698 - accuracy: 0.9363 - val_loss: 0.1533 - val_accuracy: 0.9391 - 9s/epoch - 12ms/step Epoch 9/10 702/702 - 9s - loss: 0.1618 - accuracy: 0.9419 - val_loss: 0.1404 - val_accuracy: 0.9447 - 9s/epoch - 12ms/step Epoch 10/10 702/702 - 9s - loss: 0.1568 - accuracy: 0.9430 - val_loss: 0.1169 - val_accuracy: 0.9567 - 9s/epoch - 12ms/step Epoch 1/10 702/702 - 10s - loss: 0.2427 - accuracy: 0.9003 - val_loss: 0.1395 - val_accuracy: 0.9399 - 10s/epoch - 15ms/step Epoch 2/10 702/702 - 9s - loss: 0.1717 - accuracy: 0.9358 - val_loss: 0.1397 - val_accuracy: 0.9555 - 9s/epoch - 12ms/step Epoch 3/10 702/702 - 9s - loss: 0.1564 - accuracy: 0.9417 - val_loss: 0.1256 - val_accuracy: 0.9567 - 9s/epoch - 12ms/step Epoch 4/10 702/702 - 9s - loss: 0.1499 - accuracy: 0.9435 - val_loss: 0.1403 - val_accuracy: 0.9575 - 9s/epoch - 12ms/step Epoch 5/10 702/702 - 9s - loss: 0.1412 - accuracy: 0.9476 - val_loss: 0.0888 - val_accuracy: 0.9828 - 9s/epoch - 12ms/step Epoch 6/10 702/702 - 9s - loss: 0.1349 - accuracy: 0.9492 - val_loss: 0.1568 - val_accuracy: 0.9415 - 9s/epoch - 12ms/step Epoch 7/10 702/702 - 9s - loss: 0.1298 - accuracy: 0.9521 - val_loss: 0.1698 - val_accuracy: 0.9303 - 9s/epoch - 12ms/step Epoch 8/10 702/702 - 9s - loss: 0.1274 - accuracy: 0.9525 - val_loss: 0.2571 - val_accuracy: 0.9075 - 9s/epoch - 12ms/step Epoch 9/10 702/702 - 9s - loss: 0.1220 - accuracy: 0.9547 - val_loss: 0.0519 - val_accuracy: 0.9824 - 9s/epoch - 12ms/step Epoch 10/10 702/702 - 9s - loss: 0.1176 - accuracy: 0.9579 - val_loss: 0.0777 - val_accuracy: 0.9768 - 9s/epoch - 13ms/step Epoch 1/10 702/702 - 10s - loss: 0.2395 - accuracy: 0.9061 - val_loss: 0.0631 - val_accuracy: 0.9772 - 10s/epoch - 15ms/step Epoch 2/10 702/702 - 9s - loss: 0.1723 - accuracy: 0.9354 - val_loss: 0.2093 - val_accuracy: 0.9135 - 9s/epoch - 13ms/step Epoch 3/10 702/702 - 9s - loss: 0.1578 - accuracy: 0.9404 - val_loss: 0.2666 - val_accuracy: 0.8962 - 9s/epoch - 13ms/step Epoch 4/10 702/702 - 9s - loss: 0.1496 - accuracy: 0.9444 - val_loss: 0.0403 - val_accuracy: 0.9892 - 9s/epoch - 13ms/step Epoch 5/10 702/702 - 9s - loss: 0.1433 - accuracy: 0.9466 - val_loss: 0.0534 - val_accuracy: 0.9888 - 9s/epoch - 13ms/step Epoch 6/10 702/702 - 9s - loss: 0.1414 - accuracy: 0.9485 - val_loss: 0.1851 - val_accuracy: 0.9331 - 9s/epoch - 13ms/step Epoch 7/10 702/702 - 9s - loss: 0.1366 - accuracy: 0.9505 - val_loss: 0.2318 - val_accuracy: 0.9187 - 9s/epoch - 13ms/step Epoch 8/10 702/702 - 9s - loss: 0.1337 - accuracy: 0.9511 - val_loss: 0.1093 - val_accuracy: 0.9667 - 9s/epoch - 13ms/step Epoch 9/10 702/702 - 9s - loss: 0.1282 - accuracy: 0.9529 - val_loss: 0.0359 - val_accuracy: 0.9900 - 9s/epoch - 13ms/step Epoch 10/10 702/702 - 9s - loss: 0.1267 - accuracy: 0.9553 - val_loss: 0.0850 - val_accuracy: 0.9784 - 9s/epoch - 13ms/step Epoch 1/10 702/702 - 10s - loss: 0.3086 - accuracy: 0.8674 - val_loss: 0.0746 - val_accuracy: 0.9724 - 10s/epoch - 14ms/step Epoch 2/10 702/702 - 9s - loss: 0.2156 - accuracy: 0.9153 - val_loss: 1.0750 - val_accuracy: 0.3546 - 9s/epoch - 12ms/step Epoch 3/10 702/702 - 9s - loss: 0.1928 - accuracy: 0.9257 - val_loss: 0.0797 - val_accuracy: 0.9736 - 9s/epoch - 12ms/step Epoch 4/10 702/702 - 9s - loss: 0.1803 - accuracy: 0.9303 - val_loss: 0.0238 - val_accuracy: 0.9924 - 9s/epoch - 12ms/step Epoch 5/10 702/702 - 9s - loss: 0.1710 - accuracy: 0.9351 - val_loss: 0.0275 - val_accuracy: 0.9920 - 9s/epoch - 12ms/step Epoch 6/10 702/702 - 9s - loss: 0.1703 - accuracy: 0.9338 - val_loss: 0.1759 - val_accuracy: 0.9275 - 9s/epoch - 12ms/step Epoch 7/10 702/702 - 9s - loss: 0.1654 - accuracy: 0.9368 - val_loss: 0.7003 - val_accuracy: 0.6478 - 9s/epoch - 12ms/step Epoch 8/10 702/702 - 9s - loss: 0.1629 - accuracy: 0.9379 - val_loss: 0.1171 - val_accuracy: 0.9587 - 9s/epoch - 12ms/step Epoch 9/10 702/702 - 9s - loss: 0.1542 - accuracy: 0.9422 - val_loss: 0.0340 - val_accuracy: 0.9916 - 9s/epoch - 12ms/step Epoch 10/10 702/702 - 9s - loss: 0.1538 - accuracy: 0.9430 - val_loss: 0.0160 - val_accuracy: 0.9960 - 9s/epoch - 12ms/step Epoch 1/10 702/702 - 10s - loss: 0.2607 - accuracy: 0.8932 - val_loss: 0.1492 - val_accuracy: 0.9375 - 10s/epoch - 14ms/step Epoch 2/10 702/702 - 9s - loss: 0.1838 - accuracy: 0.9321 - val_loss: 0.1895 - val_accuracy: 0.9243 - 9s/epoch - 12ms/step Epoch 3/10 702/702 - 9s - loss: 0.1681 - accuracy: 0.9370 - val_loss: 0.1008 - val_accuracy: 0.9679 - 9s/epoch - 12ms/step Epoch 4/10 702/702 - 9s - loss: 0.1592 - accuracy: 0.9406 - val_loss: 0.4218 - val_accuracy: 0.8181 - 9s/epoch - 12ms/step Epoch 5/10 702/702 - 9s - loss: 0.1503 - accuracy: 0.9454 - val_loss: 0.2060 - val_accuracy: 0.9267 - 9s/epoch - 12ms/step Epoch 6/10 702/702 - 9s - loss: 0.1464 - accuracy: 0.9456 - val_loss: 0.0618 - val_accuracy: 0.9824 - 9s/epoch - 12ms/step Epoch 7/10 702/702 - 9s - loss: 0.1381 - accuracy: 0.9483 - val_loss: 0.3310 - val_accuracy: 0.8494 - 9s/epoch - 12ms/step Epoch 8/10 702/702 - 9s - loss: 0.1347 - accuracy: 0.9519 - val_loss: 0.2108 - val_accuracy: 0.9175 - 9s/epoch - 12ms/step Epoch 9/10 702/702 - 9s - loss: 0.1311 - accuracy: 0.9532 - val_loss: 0.0634 - val_accuracy: 0.9796 - 9s/epoch - 12ms/step Epoch 10/10 702/702 - 9s - loss: 0.1278 - accuracy: 0.9555 - val_loss: 0.0828 - val_accuracy: 0.9756 - 9s/epoch - 12ms/step
df_optimizer_comp.insert(loc=5,
column='Epoch',
value=0)
# iterate through each row
Epoch = 0
i=0
for i in range(len(df_optimizer_comp)-1):
if df_optimizer_comp.iat[i,0] == df_optimizer_comp.iat[(i+1),0]:
Epoch = Epoch +1
df_optimizer_comp.at[(i+1),"Epoch"]=Epoch
else:
Epoch = 0
df_optimizer_comp.at[(i+1),"Epoch"]=Epoch
df_optimizer_comp
| Optimizer | Accuracy | Loss | Val_accuracy | Val_Loss | Epoch | |
|---|---|---|---|---|---|---|
| 0 | Adadelta | 0.590731 | 0.802437 | 0.401442 | 0.802437 | 0 |
| 1 | Adadelta | 0.663654 | 0.674262 | 0.558093 | 0.674262 | 1 |
| 2 | Adadelta | 0.702520 | 0.607563 | 0.632212 | 0.607563 | 2 |
| 3 | Adadelta | 0.730256 | 0.554809 | 0.693510 | 0.554809 | 3 |
| 4 | Adadelta | 0.755587 | 0.511539 | 0.737981 | 0.511539 | 4 |
| 5 | Adadelta | 0.774196 | 0.480493 | 0.765224 | 0.480493 | 5 |
| 6 | Adadelta | 0.789778 | 0.455977 | 0.803686 | 0.455977 | 6 |
| 7 | Adadelta | 0.797614 | 0.436052 | 0.815705 | 0.436052 | 7 |
| 8 | Adadelta | 0.813730 | 0.410878 | 0.824920 | 0.410878 | 8 |
| 9 | Adadelta | 0.819517 | 0.400044 | 0.833333 | 0.400044 | 9 |
| 10 | Adagrad | 0.840976 | 0.362262 | 0.870593 | 0.362262 | 0 |
| 11 | Adagrad | 0.908379 | 0.236364 | 0.845353 | 0.236364 | 1 |
| 12 | Adagrad | 0.920443 | 0.212059 | 0.927083 | 0.212059 | 2 |
| 13 | Adagrad | 0.925741 | 0.198625 | 0.931090 | 0.198625 | 3 |
| 14 | Adagrad | 0.931351 | 0.185805 | 0.934696 | 0.185805 | 4 |
| 15 | Adagrad | 0.932864 | 0.178398 | 0.931891 | 0.178398 | 5 |
| 16 | Adagrad | 0.935847 | 0.174200 | 0.909054 | 0.174200 | 6 |
| 17 | Adagrad | 0.936292 | 0.169840 | 0.939103 | 0.169840 | 7 |
| 18 | Adagrad | 0.941857 | 0.161771 | 0.944712 | 0.161771 | 8 |
| 19 | Adagrad | 0.943015 | 0.156769 | 0.956731 | 0.156769 | 9 |
| 20 | Adam | 0.900321 | 0.242717 | 0.939904 | 0.242717 | 0 |
| 21 | Adam | 0.935758 | 0.171709 | 0.955529 | 0.171709 | 1 |
| 22 | Adam | 0.941724 | 0.156410 | 0.956731 | 0.156410 | 2 |
| 23 | Adam | 0.943460 | 0.149872 | 0.957532 | 0.149872 | 3 |
| 24 | Adam | 0.947645 | 0.141155 | 0.982772 | 0.141155 | 4 |
| 25 | Adam | 0.949248 | 0.134932 | 0.941506 | 0.134932 | 5 |
| 26 | Adam | 0.952097 | 0.129813 | 0.930288 | 0.129813 | 6 |
| 27 | Adam | 0.952498 | 0.127392 | 0.907452 | 0.127392 | 7 |
| 28 | Adam | 0.954724 | 0.121959 | 0.982372 | 0.121959 | 8 |
| 29 | Adam | 0.957884 | 0.117635 | 0.976763 | 0.117635 | 9 |
| 30 | RMSprop | 0.906064 | 0.239500 | 0.977163 | 0.239500 | 0 |
| 31 | RMSprop | 0.935358 | 0.172256 | 0.913462 | 0.172256 | 1 |
| 32 | RMSprop | 0.940433 | 0.157821 | 0.896234 | 0.157821 | 2 |
| 33 | RMSprop | 0.944350 | 0.149554 | 0.989183 | 0.149554 | 3 |
| 34 | RMSprop | 0.946576 | 0.143270 | 0.988782 | 0.143270 | 4 |
| 35 | RMSprop | 0.948491 | 0.141421 | 0.933093 | 0.141421 | 5 |
| 36 | RMSprop | 0.950494 | 0.136573 | 0.918670 | 0.136573 | 6 |
| 37 | RMSprop | 0.951073 | 0.133733 | 0.966747 | 0.133733 | 7 |
| 38 | RMSprop | 0.952943 | 0.128206 | 0.989984 | 0.128206 | 8 |
| 39 | RMSprop | 0.955258 | 0.126666 | 0.978365 | 0.126666 | 9 |
| 40 | SGD | 0.867421 | 0.308649 | 0.972356 | 0.308649 | 0 |
| 41 | SGD | 0.915324 | 0.215590 | 0.354567 | 0.215590 | 1 |
| 42 | SGD | 0.925697 | 0.192769 | 0.973558 | 0.192769 | 2 |
| 43 | SGD | 0.930327 | 0.180309 | 0.992388 | 0.180309 | 3 |
| 44 | SGD | 0.935090 | 0.170956 | 0.991987 | 0.170956 | 4 |
| 45 | SGD | 0.933844 | 0.170304 | 0.927484 | 0.170304 | 5 |
| 46 | SGD | 0.936782 | 0.165404 | 0.647837 | 0.165404 | 6 |
| 47 | SGD | 0.937851 | 0.162903 | 0.958734 | 0.162903 | 7 |
| 48 | SGD | 0.942214 | 0.154237 | 0.991587 | 0.154237 | 8 |
| 49 | SGD | 0.942970 | 0.153804 | 0.995994 | 0.153804 | 9 |
| 50 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.893197 | 0.260666 | 0.937500 | 0.260666 | 0 |
| 51 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.932063 | 0.183792 | 0.924279 | 0.183792 | 1 |
| 52 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.937005 | 0.168109 | 0.967949 | 0.168109 | 2 |
| 53 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.940566 | 0.159211 | 0.818109 | 0.159211 | 3 |
| 54 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.945419 | 0.150304 | 0.926683 | 0.150304 | 4 |
| 55 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.945642 | 0.146394 | 0.982372 | 0.146394 | 5 |
| 56 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.948313 | 0.138058 | 0.849359 | 0.138058 | 6 |
| 57 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.951874 | 0.134738 | 0.917468 | 0.134738 | 7 |
| 58 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.953210 | 0.131061 | 0.979567 | 0.131061 | 8 |
| 59 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.955525 | 0.127829 | 0.975561 | 0.127829 | 9 |
fig, ax =plt.subplots(1,2)
sns.lineplot('Epoch', 'Accuracy', data=df_optimizer_comp, hue="Optimizer",style="Optimizer", palette="tab10", linewidth=2.5, ax=ax[0]).set(title='VGG16 Model: Training Accuracy')
sns.lineplot('Epoch', 'Val_accuracy', data=df_optimizer_comp, hue="Optimizer",style="Optimizer", palette="tab10", linewidth=2.5, ax=ax[1]).set(title='VGG16 Model: Validation Accuracy')
plt.yticks(np.arange(0.50, 1.00, 0.05))
fig.set_size_inches(16,10)
fig.show()
accuracy4 = model4.evaluate(test_images, test_labels, verbose=1)
print('\n', 'Test_Accuracy:-', accuracy4[1])
82/82 [==============================] - 1s 12ms/step - loss: 0.1173 - accuracy: 0.9542 Test_Accuracy:- 0.954230785369873
# Evaluating the model on test data
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
pred4 = model4.predict(test_images)
pred4 = np.argmax(pred4,axis = 1)
y_true4 = np.argmax(test_labels,axis = 1)
#Printing the classification report
print(classification_report(y_true4,pred4))
#Plotting the heatmap using confusion matrix
cm4 = confusion_matrix(y_true4,pred4)
plt.figure(figsize=(8,5))
sns.heatmap(cm4, annot=True, fmt='.0f', xticklabels=['Uninfected', 'Parasitized'], yticklabels=['Uninfected', 'Parasitized'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
precision recall f1-score support
0 0.95 0.96 0.95 1300
1 0.96 0.95 0.95 1300
accuracy 0.95 2600
macro avg 0.95 0.95 0.95 2600
weighted avg 0.95 0.95 0.95 2600
Observations: Model 4
| Test Accuracy | False negative out of 1,300 |
False Positive out of 1,300 |
|
|---|---|---|---|
| Base | 0.97 | 41 | 28 |
| Model 1 | 0.95 | 65 | 61 |
| Model 2 | 0.98 | 22 | 33 |
| Model 3 | 0.98 | 23 | 25 |
| Model 4 | 0.95 | 65 | 54 |
Reference: VGG16 code from https://keras.io/api/applications/vgg/
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train_images, train_labels, test_size=0.2, random_state=42)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Using ImageDataGenerator to generate images
# Add Noise
def add_noise(img):
VARIABILITY = 0.1 # customize this
deviation = VARIABILITY*np.random.random()
noise = np.random.normal(0, deviation, img.shape)
img += noise
#img = np.clip(img, 0., 255.)
return img
datagen_aug = ImageDataGenerator(
#rescale=1./255,
# customize these and other parameters
rotation_range=45,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0,
zoom_range=0,
#preprocessing_function=add_noise,
horizontal_flip=True,
fill_mode='nearest',
)
val_datagen = ImageDataGenerator()
# Flowing training images using train_datagen generator
train_generator = datagen_aug.flow(x = X_train, y = y_train, batch_size=64, seed=42, shuffle=True)
# Flowing validation images using val_datagen generator
val_generator = val_datagen.flow(x= X_val, y = y_val, batch_size=64, seed=42, shuffle=True)
#Creating an iterable for images and labels from the training data
images, labels = next(train_generator)
#Plotting 16 images from the training data
fig, axes = plt.subplots(4, 4, figsize = (16, 8))
fig.set_size_inches(16, 16)
for (image, label, ax) in zip(images, labels, axes.flatten()):
ax.imshow(image)
if label[1] == 1:
ax.set_title('parasitized')
else:
ax.set_title('uninfected')
ax.axis('off')
#Clearing backend
from tensorflow.keras import backend
backend.clear_session()
#Fixing the seed for random number generators
np.random.seed(42)
import random
random.seed(42)
tf.random.set_seed(42)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
def build_model(optimizer):
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras import Model
vgg = VGG16(include_top=False, weights='imagenet', input_shape=(64,64,3))
transfer_layer = vgg.get_layer('block5_pool')
vgg.trainable=False
# Add classification layers on top of it
x = Flatten()(transfer_layer.output) #Flatten the output from the 5th block of the VGG16 model
x = Dense(256, activation='relu')(x)
# Similarly add a dense layer with 128 neurons
x = Dense(128, activation='relu')(x)
x = Dropout(0.3)(x)
# Add a dense layer with 64 neurons
x = Dense(64, activation='relu')(x)
x = BatchNormalization()(x)
pred = Dense(2, activation='softmax')(x)
model5 = Model(vgg.input, pred) #Initializing the model
model5.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model5
#Loop Through Optimizers
df_optimizer_comp = pd.DataFrame(columns=['Optimizer', 'Accuracy', 'Loss', 'Val_accuracy', 'Val_Loss'])
optimizer = ['Adadelta', 'Adagrad', 'Adam', 'RMSprop', 'SGD', tf.keras.optimizers.Adamax(learning_rate = 0.0005)]
for i in optimizer:
model5 = build_model(i)
#Fitting the model and running the model for 10 epochs
epochs=10
history5 = model5.fit(
train_generator, validation_data=val_generator,
epochs=epochs,
batch_size=32,
#validation_split=0.1,
verbose=2
)
temp_df = pd.DataFrame(
{
'Optimizer': i,
#'Epoch': history5.history['Epoch']
'Accuracy': history5.history['accuracy'],
'Loss': history5.history['loss'],
'Val_accuracy': history5.history['val_accuracy'],
'Val_Loss': history5.history['loss']
}
)
df_optimizer_comp = df_optimizer_comp.append(temp_df, ignore_index=True)
# Delete multiple columns from the dataframe
temp_df = temp_df.drop(['Optimizer', 'Accuracy', 'Loss', 'Val_accuracy', 'Val_Loss'], axis=1)
Epoch 1/10 312/312 - 23s - loss: 0.8810 - accuracy: 0.5434 - val_loss: 0.6100 - val_accuracy: 0.6284 - 23s/epoch - 74ms/step Epoch 2/10 312/312 - 21s - loss: 0.8173 - accuracy: 0.5749 - val_loss: 0.5855 - val_accuracy: 0.6526 - 21s/epoch - 69ms/step Epoch 3/10 312/312 - 22s - loss: 0.7608 - accuracy: 0.6068 - val_loss: 0.5492 - val_accuracy: 0.7063 - 22s/epoch - 70ms/step Epoch 4/10 312/312 - 21s - loss: 0.7322 - accuracy: 0.6234 - val_loss: 0.5187 - val_accuracy: 0.7504 - 21s/epoch - 68ms/step Epoch 5/10 312/312 - 21s - loss: 0.7075 - accuracy: 0.6401 - val_loss: 0.4927 - val_accuracy: 0.7804 - 21s/epoch - 67ms/step Epoch 6/10 312/312 - 21s - loss: 0.6777 - accuracy: 0.6569 - val_loss: 0.4765 - val_accuracy: 0.7995 - 21s/epoch - 67ms/step Epoch 7/10 312/312 - 21s - loss: 0.6547 - accuracy: 0.6727 - val_loss: 0.4634 - val_accuracy: 0.8093 - 21s/epoch - 68ms/step Epoch 8/10 312/312 - 21s - loss: 0.6354 - accuracy: 0.6816 - val_loss: 0.4510 - val_accuracy: 0.8189 - 21s/epoch - 66ms/step Epoch 9/10 312/312 - 21s - loss: 0.6115 - accuracy: 0.6967 - val_loss: 0.4416 - val_accuracy: 0.8237 - 21s/epoch - 66ms/step Epoch 10/10 312/312 - 21s - loss: 0.6004 - accuracy: 0.7016 - val_loss: 0.4304 - val_accuracy: 0.8283 - 21s/epoch - 66ms/step Epoch 1/10 312/312 - 22s - loss: 0.5321 - accuracy: 0.7443 - val_loss: 0.3758 - val_accuracy: 0.8578 - 22s/epoch - 72ms/step Epoch 2/10 312/312 - 21s - loss: 0.4004 - accuracy: 0.8280 - val_loss: 0.3324 - val_accuracy: 0.8554 - 21s/epoch - 68ms/step Epoch 3/10 312/312 - 21s - loss: 0.3661 - accuracy: 0.8437 - val_loss: 0.2733 - val_accuracy: 0.8948 - 21s/epoch - 67ms/step Epoch 4/10 312/312 - 21s - loss: 0.3446 - accuracy: 0.8538 - val_loss: 0.2651 - val_accuracy: 0.8956 - 21s/epoch - 66ms/step Epoch 5/10 312/312 - 21s - loss: 0.3296 - accuracy: 0.8656 - val_loss: 0.2570 - val_accuracy: 0.8980 - 21s/epoch - 67ms/step Epoch 6/10 312/312 - 21s - loss: 0.3203 - accuracy: 0.8695 - val_loss: 0.2466 - val_accuracy: 0.9034 - 21s/epoch - 66ms/step Epoch 7/10 312/312 - 21s - loss: 0.3117 - accuracy: 0.8747 - val_loss: 0.2167 - val_accuracy: 0.9235 - 21s/epoch - 66ms/step Epoch 8/10 312/312 - 21s - loss: 0.3074 - accuracy: 0.8774 - val_loss: 0.2383 - val_accuracy: 0.9101 - 21s/epoch - 66ms/step Epoch 9/10 312/312 - 21s - loss: 0.2985 - accuracy: 0.8825 - val_loss: 0.2239 - val_accuracy: 0.9155 - 21s/epoch - 66ms/step Epoch 10/10 312/312 - 21s - loss: 0.2957 - accuracy: 0.8800 - val_loss: 0.2043 - val_accuracy: 0.9199 - 21s/epoch - 67ms/step Epoch 1/10 312/312 - 22s - loss: 0.3615 - accuracy: 0.8461 - val_loss: 0.2124 - val_accuracy: 0.9235 - 22s/epoch - 70ms/step Epoch 2/10 312/312 - 20s - loss: 0.2813 - accuracy: 0.8872 - val_loss: 0.4128 - val_accuracy: 0.7829 - 20s/epoch - 65ms/step Epoch 3/10 312/312 - 20s - loss: 0.2668 - accuracy: 0.8952 - val_loss: 0.1965 - val_accuracy: 0.9279 - 20s/epoch - 66ms/step Epoch 4/10 312/312 - 20s - loss: 0.2571 - accuracy: 0.8978 - val_loss: 0.1682 - val_accuracy: 0.9391 - 20s/epoch - 66ms/step Epoch 5/10 312/312 - 20s - loss: 0.2531 - accuracy: 0.9000 - val_loss: 0.2151 - val_accuracy: 0.9133 - 20s/epoch - 66ms/step Epoch 6/10 312/312 - 21s - loss: 0.2521 - accuracy: 0.8988 - val_loss: 0.1689 - val_accuracy: 0.9415 - 21s/epoch - 66ms/step Epoch 7/10 312/312 - 21s - loss: 0.2447 - accuracy: 0.9041 - val_loss: 0.1698 - val_accuracy: 0.9343 - 21s/epoch - 66ms/step Epoch 8/10 312/312 - 21s - loss: 0.2438 - accuracy: 0.9055 - val_loss: 0.1816 - val_accuracy: 0.9327 - 21s/epoch - 66ms/step Epoch 9/10 312/312 - 21s - loss: 0.2403 - accuracy: 0.9058 - val_loss: 0.1529 - val_accuracy: 0.9427 - 21s/epoch - 66ms/step Epoch 10/10 312/312 - 20s - loss: 0.2322 - accuracy: 0.9098 - val_loss: 0.1539 - val_accuracy: 0.9407 - 20s/epoch - 66ms/step Epoch 1/10 312/312 - 22s - loss: 0.3507 - accuracy: 0.8524 - val_loss: 0.2859 - val_accuracy: 0.8896 - 22s/epoch - 71ms/step Epoch 2/10 312/312 - 21s - loss: 0.2764 - accuracy: 0.8904 - val_loss: 0.1715 - val_accuracy: 0.9321 - 21s/epoch - 66ms/step Epoch 3/10 312/312 - 21s - loss: 0.2677 - accuracy: 0.8941 - val_loss: 0.2080 - val_accuracy: 0.9221 - 21s/epoch - 67ms/step Epoch 4/10 312/312 - 21s - loss: 0.2608 - accuracy: 0.8952 - val_loss: 0.1883 - val_accuracy: 0.9261 - 21s/epoch - 67ms/step Epoch 5/10 312/312 - 21s - loss: 0.2506 - accuracy: 0.9014 - val_loss: 0.2175 - val_accuracy: 0.9193 - 21s/epoch - 66ms/step Epoch 6/10 312/312 - 21s - loss: 0.2509 - accuracy: 0.9012 - val_loss: 0.1634 - val_accuracy: 0.9363 - 21s/epoch - 67ms/step Epoch 7/10 312/312 - 21s - loss: 0.2399 - accuracy: 0.9071 - val_loss: 0.1702 - val_accuracy: 0.9395 - 21s/epoch - 66ms/step Epoch 8/10 312/312 - 21s - loss: 0.2447 - accuracy: 0.9031 - val_loss: 0.1625 - val_accuracy: 0.9385 - 21s/epoch - 66ms/step Epoch 9/10 312/312 - 21s - loss: 0.2391 - accuracy: 0.9051 - val_loss: 0.1998 - val_accuracy: 0.9233 - 21s/epoch - 67ms/step Epoch 10/10 312/312 - 21s - loss: 0.2364 - accuracy: 0.9071 - val_loss: 0.1981 - val_accuracy: 0.9249 - 21s/epoch - 67ms/step Epoch 1/10 312/312 - 22s - loss: 0.4310 - accuracy: 0.8047 - val_loss: 0.2744 - val_accuracy: 0.8976 - 22s/epoch - 70ms/step Epoch 2/10 312/312 - 21s - loss: 0.3364 - accuracy: 0.8595 - val_loss: 0.2055 - val_accuracy: 0.9193 - 21s/epoch - 66ms/step Epoch 3/10 312/312 - 21s - loss: 0.3048 - accuracy: 0.8763 - val_loss: 0.2161 - val_accuracy: 0.9145 - 21s/epoch - 67ms/step Epoch 4/10 312/312 - 21s - loss: 0.2877 - accuracy: 0.8821 - val_loss: 0.1762 - val_accuracy: 0.9323 - 21s/epoch - 67ms/step Epoch 5/10 312/312 - 21s - loss: 0.2767 - accuracy: 0.8902 - val_loss: 0.1794 - val_accuracy: 0.9297 - 21s/epoch - 67ms/step Epoch 6/10 312/312 - 21s - loss: 0.2723 - accuracy: 0.8905 - val_loss: 0.1751 - val_accuracy: 0.9363 - 21s/epoch - 67ms/step Epoch 7/10 312/312 - 21s - loss: 0.2637 - accuracy: 0.8952 - val_loss: 0.2863 - val_accuracy: 0.8846 - 21s/epoch - 67ms/step Epoch 8/10 312/312 - 21s - loss: 0.2691 - accuracy: 0.8927 - val_loss: 0.1708 - val_accuracy: 0.9337 - 21s/epoch - 67ms/step Epoch 9/10 312/312 - 21s - loss: 0.2698 - accuracy: 0.8909 - val_loss: 0.2136 - val_accuracy: 0.9113 - 21s/epoch - 67ms/step Epoch 10/10 312/312 - 21s - loss: 0.2598 - accuracy: 0.8981 - val_loss: 0.1942 - val_accuracy: 0.9267 - 21s/epoch - 68ms/step Epoch 1/10 312/312 - 22s - loss: 0.3792 - accuracy: 0.8344 - val_loss: 0.2312 - val_accuracy: 0.9249 - 22s/epoch - 70ms/step Epoch 2/10 312/312 - 21s - loss: 0.2934 - accuracy: 0.8816 - val_loss: 0.1877 - val_accuracy: 0.9347 - 21s/epoch - 67ms/step Epoch 3/10 312/312 - 21s - loss: 0.2767 - accuracy: 0.8887 - val_loss: 0.1683 - val_accuracy: 0.9353 - 21s/epoch - 66ms/step Epoch 4/10 312/312 - 21s - loss: 0.2623 - accuracy: 0.8958 - val_loss: 0.1701 - val_accuracy: 0.9347 - 21s/epoch - 67ms/step Epoch 5/10 312/312 - 21s - loss: 0.2598 - accuracy: 0.8993 - val_loss: 0.1619 - val_accuracy: 0.9429 - 21s/epoch - 66ms/step Epoch 6/10 312/312 - 21s - loss: 0.2562 - accuracy: 0.8982 - val_loss: 0.1690 - val_accuracy: 0.9369 - 21s/epoch - 66ms/step Epoch 7/10 312/312 - 21s - loss: 0.2534 - accuracy: 0.9013 - val_loss: 0.1837 - val_accuracy: 0.9285 - 21s/epoch - 66ms/step Epoch 8/10 312/312 - 22s - loss: 0.2486 - accuracy: 0.9024 - val_loss: 0.3054 - val_accuracy: 0.8650 - 22s/epoch - 69ms/step Epoch 9/10 312/312 - 21s - loss: 0.2496 - accuracy: 0.9006 - val_loss: 0.1594 - val_accuracy: 0.9387 - 21s/epoch - 66ms/step Epoch 10/10 312/312 - 23s - loss: 0.2418 - accuracy: 0.9064 - val_loss: 0.1636 - val_accuracy: 0.9427 - 23s/epoch - 73ms/step
df_optimizer_comp.insert(loc=5,
column='Epoch',
value=0)
# iterate through each row
Epoch = 0
i=0
for i in range(len(df_optimizer_comp)-1):
if df_optimizer_comp.iat[i,0] == df_optimizer_comp.iat[(i+1),0]:
Epoch = Epoch +1
df_optimizer_comp.at[(i+1),"Epoch"]=Epoch
else:
Epoch = 0
df_optimizer_comp.at[(i+1),"Epoch"]=Epoch
df_optimizer_comp
| Optimizer | Accuracy | Loss | Val_accuracy | Val_Loss | Epoch | |
|---|---|---|---|---|---|---|
| 0 | Adadelta | 0.543424 | 0.881049 | 0.628405 | 0.881049 | 0 |
| 1 | Adadelta | 0.574877 | 0.817288 | 0.652644 | 0.817288 | 1 |
| 2 | Adadelta | 0.606782 | 0.760786 | 0.706330 | 0.760786 | 2 |
| 3 | Adadelta | 0.623410 | 0.732203 | 0.750401 | 0.732203 | 3 |
| 4 | Adadelta | 0.640088 | 0.707544 | 0.780449 | 0.707544 | 4 |
| 5 | Adadelta | 0.656867 | 0.677688 | 0.799479 | 0.677688 | 5 |
| 6 | Adadelta | 0.672694 | 0.654690 | 0.809295 | 0.654690 | 6 |
| 7 | Adadelta | 0.681609 | 0.635413 | 0.818910 | 0.635413 | 7 |
| 8 | Adadelta | 0.696684 | 0.611453 | 0.823718 | 0.611453 | 8 |
| 9 | Adadelta | 0.701643 | 0.600435 | 0.828325 | 0.600435 | 9 |
| 10 | Adagrad | 0.744315 | 0.532149 | 0.857772 | 0.532149 | 0 |
| 11 | Adagrad | 0.828008 | 0.400361 | 0.855369 | 0.400361 | 1 |
| 12 | Adagrad | 0.843684 | 0.366071 | 0.894832 | 0.366071 | 2 |
| 13 | Adagrad | 0.853801 | 0.344611 | 0.895633 | 0.344611 | 3 |
| 14 | Adagrad | 0.865571 | 0.329646 | 0.898037 | 0.329646 | 4 |
| 15 | Adagrad | 0.869528 | 0.320302 | 0.903446 | 0.320302 | 5 |
| 16 | Adagrad | 0.874737 | 0.311719 | 0.923478 | 0.311719 | 6 |
| 17 | Adagrad | 0.877442 | 0.307354 | 0.910056 | 0.307354 | 7 |
| 18 | Adagrad | 0.882450 | 0.298537 | 0.915465 | 0.298537 | 8 |
| 19 | Adagrad | 0.879996 | 0.295673 | 0.919872 | 0.295673 | 9 |
| 20 | Adam | 0.846088 | 0.361537 | 0.923478 | 0.361537 | 0 |
| 21 | Adam | 0.887208 | 0.281323 | 0.782853 | 0.281323 | 1 |
| 22 | Adam | 0.895172 | 0.266842 | 0.927885 | 0.266842 | 2 |
| 23 | Adam | 0.897826 | 0.257056 | 0.939103 | 0.257056 | 3 |
| 24 | Adam | 0.899980 | 0.253121 | 0.913261 | 0.253121 | 4 |
| 25 | Adam | 0.898828 | 0.252128 | 0.941506 | 0.252128 | 5 |
| 26 | Adam | 0.904087 | 0.244739 | 0.934295 | 0.244739 | 6 |
| 27 | Adam | 0.905539 | 0.243824 | 0.932692 | 0.243824 | 7 |
| 28 | Adam | 0.905840 | 0.240308 | 0.942708 | 0.240308 | 8 |
| 29 | Adam | 0.909797 | 0.232170 | 0.940705 | 0.232170 | 9 |
| 30 | RMSprop | 0.852399 | 0.350698 | 0.889623 | 0.350698 | 0 |
| 31 | RMSprop | 0.890364 | 0.276424 | 0.932091 | 0.276424 | 1 |
| 32 | RMSprop | 0.894070 | 0.267676 | 0.922075 | 0.267676 | 2 |
| 33 | RMSprop | 0.895172 | 0.260797 | 0.926082 | 0.260797 | 3 |
| 34 | RMSprop | 0.901432 | 0.250562 | 0.919271 | 0.250562 | 4 |
| 35 | RMSprop | 0.901232 | 0.250908 | 0.936298 | 0.250908 | 5 |
| 36 | RMSprop | 0.907092 | 0.239861 | 0.939503 | 0.239861 | 6 |
| 37 | RMSprop | 0.903085 | 0.244733 | 0.938502 | 0.244733 | 7 |
| 38 | RMSprop | 0.905139 | 0.239133 | 0.923277 | 0.239133 | 8 |
| 39 | RMSprop | 0.907092 | 0.236354 | 0.924880 | 0.236354 | 9 |
| 40 | SGD | 0.804668 | 0.430959 | 0.897636 | 0.430959 | 0 |
| 41 | SGD | 0.859511 | 0.336394 | 0.919271 | 0.336394 | 1 |
| 42 | SGD | 0.876290 | 0.304783 | 0.914463 | 0.304783 | 2 |
| 43 | SGD | 0.882100 | 0.287702 | 0.932292 | 0.287702 | 3 |
| 44 | SGD | 0.890163 | 0.276735 | 0.929688 | 0.276735 | 4 |
| 45 | SGD | 0.890464 | 0.272284 | 0.936298 | 0.272284 | 5 |
| 46 | SGD | 0.895222 | 0.263745 | 0.884615 | 0.263745 | 6 |
| 47 | SGD | 0.892718 | 0.269073 | 0.933694 | 0.269073 | 7 |
| 48 | SGD | 0.890864 | 0.269843 | 0.911258 | 0.269843 | 8 |
| 49 | SGD | 0.898077 | 0.259751 | 0.926683 | 0.259751 | 9 |
| 50 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.834368 | 0.379209 | 0.924880 | 0.379209 | 0 |
| 51 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.881599 | 0.293390 | 0.934696 | 0.293390 | 1 |
| 52 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.888661 | 0.276738 | 0.935296 | 0.276738 | 2 |
| 53 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.895823 | 0.262338 | 0.934696 | 0.262338 | 3 |
| 54 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.899329 | 0.259820 | 0.942909 | 0.259820 | 4 |
| 55 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.898177 | 0.256199 | 0.936899 | 0.256199 | 5 |
| 56 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.901332 | 0.253404 | 0.928486 | 0.253404 | 6 |
| 57 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.902434 | 0.248628 | 0.864984 | 0.248628 | 7 |
| 58 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.900581 | 0.249562 | 0.938702 | 0.249562 | 8 |
| 59 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.906391 | 0.241799 | 0.942708 | 0.241799 | 9 |
fig, ax =plt.subplots(1,2)
sns.lineplot('Epoch', 'Accuracy', data=df_optimizer_comp, hue="Optimizer",style="Optimizer", palette="tab10", linewidth=2.5, ax=ax[0]).set(title='VGG16 Model w/ Augmentation: Training Accuracy')
sns.lineplot('Epoch', 'Val_accuracy', data=df_optimizer_comp, hue="Optimizer",style="Optimizer", palette="tab10", linewidth=2.5, ax=ax[1]).set(title='VGG16 Model w/ Augmentation: Validation Accuracy')
plt.yticks(np.arange(0.50, 1.00, 0.05))
fig.set_size_inches(16,10)
fig.show()
accuracy5 = model5.evaluate(test_images, test_labels, verbose=1)
print('\n', 'Test_Accuracy:-', accuracy4[1])
82/82 [==============================] - 1s 12ms/step - loss: 0.1989 - accuracy: 0.9227 Test_Accuracy:- 0.954230785369873
# Evaluating the model on test data
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
pred5 = model5.predict(test_images)
pred5 = np.argmax(pred5,axis = 1)
y_true5 = np.argmax(test_labels,axis = 1)
#Printing the classification report
print(classification_report(y_true5,pred5))
#Plotting the heatmap using confusion matrix
cm5 = confusion_matrix(y_true5,pred5)
plt.figure(figsize=(8,5))
sns.heatmap(cm5, annot=True, fmt='.0f', xticklabels=['Uninfected', 'Parasitized'], yticklabels=['Uninfected', 'Parasitized'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
precision recall f1-score support
0 0.96 0.88 0.92 1300
1 0.89 0.96 0.93 1300
accuracy 0.92 2600
macro avg 0.93 0.92 0.92 2600
weighted avg 0.93 0.92 0.92 2600
Observations: Model 5
| Test Accuracy | False negative out of 1,300 |
False Positive out of 1,300 |
|
|---|---|---|---|
| Base | 0.97 | 41 | 28 |
| Model 1 | 0.95 | 65 | 61 |
| Model 2 | 0.98 | 22 | 33 |
| Model 3 | 0.98 | 23 | 25 |
| Model 4 | 0.95 | 65 | 54 |
| Model 5 | 0.92 | 47 | 154 |
Reference: Some Inception code from https://www.analyticsvidhya.com/blog/2020/08/top-4-pre-trained-models-for-image-classification-with-python-code/
#Importing libraries required to load the data
import zipfile
import os
from PIL import Image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, BatchNormalization, Dropout, Flatten, LeakyReLU, GlobalAvgPool2D
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import optimizers
#to ignore warnings
import warnings
warnings.filterwarnings('ignore')
# Remove the limit from the number of displayed columns and rows. It helps to see the entire dataframe while printing it
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", 200)
#Storing the path of the data file from the Google drive
path = '/content/drive/Othercomputers/My Laptop/!Mike_Sync/!MIT_Applied_Data_Science/7_Capstone_Project/cell_images.zip'
#The data is provided as a zip file so we need to extract the files from the zip file
with zipfile.ZipFile(path, 'r') as zip_ref:
zip_ref.extractall()
#Storing the path of the extracted "train" folder
train_dir = '/content/cell_images/train'
#Size of image so that each image has the same size
SIZE = 150
#Empty list to store the training images after they are converted to NumPy arrays
train_images = []
#Empty list to store the training labels (0 - uninfected, 1 - parasitized)
train_labels = []
#We will run the same code for "parasitized" as well as "uninfected" folders within the "train" folder
for folder_name in ['/parasitized/', '/uninfected/']:
#Path of the folder
images_path = os.listdir(train_dir + folder_name)
for i, image_name in enumerate(images_path):
try:
#Opening each image using the path of that image
image = Image.open(train_dir + folder_name + image_name)
#Resizing each image to (64,64)
image = image.resize((SIZE, SIZE))
#Converting images to arrays and appending that array to the empty list defined above
train_images.append(np.array(image))
#Creating labels for parasitized and uninfected images
if folder_name=='/parasitized/':
train_labels.append(1)
else:
train_labels.append(0)
except Exception:
pass
#Converting lists to arrays
train_images = np.array(train_images)
train_labels = np.array(train_labels)
#Storing the path of the extracted "test" folder
test_dir = '/content/cell_images/test'
#Size of image so that each image has the same size (it must be same as the train image size)
SIZE = 150
#Empty list to store the testing images after they are converted to NumPy arrays
test_images = []
#Empty list to store the testing labels (0 - uninfected, 1 - parasitized)
test_labels = []
#We will run the same code for "parasitized" as well as "uninfected" folders within the "test" folder
for folder_name in ['/parasitized/', '/uninfected/']:
#Path of the folder
images_path = os.listdir(test_dir + folder_name)
for i, image_name in enumerate(images_path):
try:
#Opening each image using the path of that image
image = Image.open(test_dir + folder_name + image_name)
#Resizing each image to (64,64)!!!!150,150!!!!!!!!!!!!!!!!!!!!!!
image = image.resize((SIZE, SIZE))
#Converting images to arrays and appending that array to the empty list defined above
test_images.append(np.array(image))
#Creating labels for parasitized and uninfected images
if folder_name=='/parasitized/':
test_labels.append(1)
else:
test_labels.append(0)
except Exception:
pass
#Converting lists to arrays
test_images = np.array(test_images)
test_labels = np.array(test_labels)
len(test_images)
2600
# try to normalize the train and test images by dividing it by 255 and convert them to float32 using astype function
train_images = (train_images/255).astype('float32')
test_images = (test_images/255).astype('float32')
As we have done our preprocessing required and performed some EDA to gain some insights in our Milestone-1 so now we will try to build our model and try evaluating its performance.
# Encoding Train Labels
train_labels=to_categorical(train_labels,2)
# Similarly let us try to encode test labels
test_labels=to_categorical(test_labels,2)
# print(train_labels)
#print(test_labels)
# function to plot train and validation accuracy
def plot_accuracy(history):
N = len(history.history["accuracy"])
plt.figure(figsize=(7,7))
plt.plot(np.arange(0, N), history.history["accuracy"], label="train_accuracy", ls='--')
plt.plot(np.arange(0, N), history.history["val_accuracy"], label="val_accuracy", ls='--')
plt.title("Accuracy vs Epoch")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend(loc="upper left")
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train_images, train_labels, test_size=0.2, random_state=42)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Using ImageDataGenerator to generate images
# Add Noise
def add_noise(img):
VARIABILITY = 0.1 # customize this
deviation = VARIABILITY*np.random.random()
noise = np.random.normal(0, deviation, img.shape)
img += noise
#img = np.clip(img, 0., 255.)
return img
datagen_aug = ImageDataGenerator(
#rescale=1./255,
# customize these and other parameters
rotation_range=45,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0,
zoom_range=0,
#preprocessing_function=add_noise,
horizontal_flip=True,
fill_mode='nearest',
)
val_datagen = ImageDataGenerator()
# Flowing training images using train_datagen generator
train_generator = datagen_aug.flow(x = X_train, y = y_train, batch_size=64, seed=42, shuffle=True)
# Flowing validation images using val_datagen generator
val_generator = val_datagen.flow(x= X_val, y = y_val, batch_size=64, seed=42, shuffle=True)
#Clearing backend
from tensorflow.keras import backend
backend.clear_session()
#Fixing the seed for random number generators
np.random.seed(42)
import random
random.seed(42)
tf.random.set_seed(42)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
#Loading the Base Model
from tensorflow.keras.applications.inception_v3 import InceptionV3
base_model = InceptionV3(input_shape = (150, 150, 3), include_top = False, weights = 'imagenet')
#Compile and Fit
for layer in base_model.layers:
layer.trainable = False
def build_model(optimizer):
from tensorflow.keras import layers
from tensorflow.keras.optimizers import RMSprop
x = layers.Flatten()(base_model.output)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dense(128, activation='relu')(x)
x = layers.Dropout(0.3)(x)
x = layers.Dense(64, activation='relu')(x)
# Add a final sigmoid layer with 2 node for classification output
x = layers.Dense(2, activation='sigmoid')(x)
model = tf.keras.models.Model(base_model.input, x)
##optimizer = RMSprop(lr=0.0001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model
#Loop Through Optimizers
df_optimizer_comp = pd.DataFrame(columns=['Optimizer', 'Accuracy', 'Loss', 'Val_accuracy', 'Val_Loss'])
optimizer = ['Adadelta', 'Adagrad', 'Adam', 'RMSprop', 'SGD', tf.keras.optimizers.Adamax(learning_rate = 0.0005)]
#inc_history = model.fit_generator(train_generator, validation_data = val_generator, steps_per_epoch = 100, epochs = 2)
for i in optimizer:
model = build_model(i)
#Fitting the model and running the model for 10 epochs
epochs=10
history = model.fit(
train_generator, validation_data=val_generator,
epochs=epochs,
batch_size=32,
#validation_split=0.1,
verbose=2
)
temp_df = pd.DataFrame(
{
'Optimizer': i,
#'Epoch': history.history['Epoch']
'Accuracy': history.history['accuracy'],
'Loss': history.history['loss'],
'Val_accuracy': history.history['val_accuracy'],
'Val_Loss': history.history['loss']
}
)
df_optimizer_comp = df_optimizer_comp.append(temp_df, ignore_index=True)
# Delete multiple columns from the dataframe
temp_df = temp_df.drop(['Optimizer', 'Accuracy', 'Loss', 'Val_accuracy', 'Val_Loss'], axis=1)
Epoch 1/10 312/312 - 101s - loss: 0.6091 - accuracy: 0.6556 - val_loss: 0.4677 - val_accuracy: 0.8289 - 101s/epoch - 325ms/step Epoch 2/10 312/312 - 89s - loss: 0.4252 - accuracy: 0.8095 - val_loss: 0.3639 - val_accuracy: 0.8706 - 89s/epoch - 287ms/step Epoch 3/10 312/312 - 89s - loss: 0.3675 - accuracy: 0.8483 - val_loss: 0.3155 - val_accuracy: 0.8908 - 89s/epoch - 286ms/step Epoch 4/10 312/312 - 89s - loss: 0.3389 - accuracy: 0.8646 - val_loss: 0.2891 - val_accuracy: 0.8984 - 89s/epoch - 285ms/step Epoch 5/10 312/312 - 89s - loss: 0.3188 - accuracy: 0.8714 - val_loss: 0.2708 - val_accuracy: 0.9081 - 89s/epoch - 285ms/step Epoch 6/10 312/312 - 89s - loss: 0.3075 - accuracy: 0.8780 - val_loss: 0.2578 - val_accuracy: 0.9105 - 89s/epoch - 286ms/step Epoch 7/10 312/312 - 89s - loss: 0.2954 - accuracy: 0.8844 - val_loss: 0.2402 - val_accuracy: 0.9163 - 89s/epoch - 286ms/step Epoch 8/10 312/312 - 89s - loss: 0.2935 - accuracy: 0.8870 - val_loss: 0.2323 - val_accuracy: 0.9181 - 89s/epoch - 285ms/step Epoch 9/10 312/312 - 89s - loss: 0.2866 - accuracy: 0.8888 - val_loss: 0.2261 - val_accuracy: 0.9203 - 89s/epoch - 284ms/step Epoch 10/10 312/312 - 90s - loss: 0.2830 - accuracy: 0.8896 - val_loss: 0.2213 - val_accuracy: 0.9187 - 90s/epoch - 287ms/step Epoch 1/10 312/312 - 98s - loss: 0.3374 - accuracy: 0.8611 - val_loss: 0.2038 - val_accuracy: 0.9281 - 98s/epoch - 316ms/step Epoch 2/10 312/312 - 90s - loss: 0.2638 - accuracy: 0.8968 - val_loss: 0.1788 - val_accuracy: 0.9323 - 90s/epoch - 288ms/step Epoch 3/10 312/312 - 89s - loss: 0.2512 - accuracy: 0.9023 - val_loss: 0.1769 - val_accuracy: 0.9313 - 89s/epoch - 286ms/step Epoch 4/10 312/312 - 89s - loss: 0.2402 - accuracy: 0.9080 - val_loss: 0.1615 - val_accuracy: 0.9385 - 89s/epoch - 284ms/step Epoch 5/10 312/312 - 89s - loss: 0.2314 - accuracy: 0.9096 - val_loss: 0.1555 - val_accuracy: 0.9409 - 89s/epoch - 286ms/step Epoch 6/10 312/312 - 89s - loss: 0.2283 - accuracy: 0.9123 - val_loss: 0.1615 - val_accuracy: 0.9389 - 89s/epoch - 284ms/step Epoch 7/10 312/312 - 89s - loss: 0.2284 - accuracy: 0.9131 - val_loss: 0.1494 - val_accuracy: 0.9427 - 89s/epoch - 286ms/step Epoch 8/10 312/312 - 89s - loss: 0.2235 - accuracy: 0.9134 - val_loss: 0.1509 - val_accuracy: 0.9417 - 89s/epoch - 285ms/step Epoch 9/10 312/312 - 89s - loss: 0.2194 - accuracy: 0.9152 - val_loss: 0.1465 - val_accuracy: 0.9429 - 89s/epoch - 286ms/step Epoch 10/10 312/312 - 89s - loss: 0.2230 - accuracy: 0.9134 - val_loss: 0.1464 - val_accuracy: 0.9443 - 89s/epoch - 285ms/step Epoch 1/10 312/312 - 95s - loss: 0.4676 - accuracy: 0.8540 - val_loss: 0.1803 - val_accuracy: 0.9319 - 95s/epoch - 306ms/step Epoch 2/10 312/312 - 89s - loss: 0.2648 - accuracy: 0.8979 - val_loss: 0.1809 - val_accuracy: 0.9363 - 89s/epoch - 285ms/step Epoch 3/10 312/312 - 88s - loss: 0.2488 - accuracy: 0.9031 - val_loss: 0.1709 - val_accuracy: 0.9355 - 88s/epoch - 283ms/step Epoch 4/10 312/312 - 88s - loss: 0.2431 - accuracy: 0.9072 - val_loss: 0.1569 - val_accuracy: 0.9409 - 88s/epoch - 283ms/step Epoch 5/10 312/312 - 89s - loss: 0.2254 - accuracy: 0.9135 - val_loss: 0.1630 - val_accuracy: 0.9423 - 89s/epoch - 284ms/step Epoch 6/10 312/312 - 88s - loss: 0.2209 - accuracy: 0.9141 - val_loss: 0.1571 - val_accuracy: 0.9431 - 88s/epoch - 283ms/step Epoch 7/10 312/312 - 89s - loss: 0.2205 - accuracy: 0.9158 - val_loss: 0.1657 - val_accuracy: 0.9463 - 89s/epoch - 284ms/step Epoch 8/10 312/312 - 89s - loss: 0.2123 - accuracy: 0.9181 - val_loss: 0.1432 - val_accuracy: 0.9455 - 89s/epoch - 284ms/step Epoch 9/10 312/312 - 89s - loss: 0.2204 - accuracy: 0.9148 - val_loss: 0.1795 - val_accuracy: 0.9365 - 89s/epoch - 284ms/step Epoch 10/10 312/312 - 89s - loss: 0.2132 - accuracy: 0.9173 - val_loss: 0.1623 - val_accuracy: 0.9463 - 89s/epoch - 286ms/step Epoch 1/10 312/312 - 95s - loss: 0.8634 - accuracy: 0.8067 - val_loss: 0.1891 - val_accuracy: 0.9309 - 95s/epoch - 304ms/step Epoch 2/10 312/312 - 89s - loss: 0.3118 - accuracy: 0.8805 - val_loss: 0.1743 - val_accuracy: 0.9309 - 89s/epoch - 284ms/step Epoch 3/10 312/312 - 89s - loss: 0.2804 - accuracy: 0.8944 - val_loss: 0.1950 - val_accuracy: 0.9401 - 89s/epoch - 284ms/step Epoch 4/10 312/312 - 89s - loss: 0.2657 - accuracy: 0.8988 - val_loss: 0.2141 - val_accuracy: 0.9187 - 89s/epoch - 284ms/step Epoch 5/10 312/312 - 88s - loss: 0.2491 - accuracy: 0.9069 - val_loss: 0.1861 - val_accuracy: 0.9367 - 88s/epoch - 283ms/step Epoch 6/10 312/312 - 89s - loss: 0.2422 - accuracy: 0.9065 - val_loss: 0.1581 - val_accuracy: 0.9413 - 89s/epoch - 285ms/step Epoch 7/10 312/312 - 89s - loss: 0.2355 - accuracy: 0.9102 - val_loss: 0.1720 - val_accuracy: 0.9411 - 89s/epoch - 284ms/step Epoch 8/10 312/312 - 88s - loss: 0.2322 - accuracy: 0.9125 - val_loss: 0.1645 - val_accuracy: 0.9417 - 88s/epoch - 283ms/step Epoch 9/10 312/312 - 88s - loss: 0.2320 - accuracy: 0.9136 - val_loss: 0.1616 - val_accuracy: 0.9457 - 88s/epoch - 282ms/step Epoch 10/10 312/312 - 88s - loss: 0.2247 - accuracy: 0.9199 - val_loss: 0.1866 - val_accuracy: 0.9395 - 88s/epoch - 282ms/step Epoch 1/10 312/312 - 93s - loss: 0.3390 - accuracy: 0.8618 - val_loss: 0.2051 - val_accuracy: 0.9235 - 93s/epoch - 298ms/step Epoch 2/10 312/312 - 89s - loss: 0.2548 - accuracy: 0.8978 - val_loss: 0.1662 - val_accuracy: 0.9351 - 89s/epoch - 284ms/step Epoch 3/10 312/312 - 90s - loss: 0.2371 - accuracy: 0.9071 - val_loss: 0.1818 - val_accuracy: 0.9279 - 90s/epoch - 288ms/step Epoch 4/10 312/312 - 90s - loss: 0.2333 - accuracy: 0.9098 - val_loss: 0.1509 - val_accuracy: 0.9405 - 90s/epoch - 289ms/step Epoch 5/10 312/312 - 90s - loss: 0.2262 - accuracy: 0.9138 - val_loss: 0.1485 - val_accuracy: 0.9425 - 90s/epoch - 288ms/step Epoch 6/10 312/312 - 90s - loss: 0.2254 - accuracy: 0.9125 - val_loss: 0.1477 - val_accuracy: 0.9443 - 90s/epoch - 287ms/step Epoch 7/10 312/312 - 89s - loss: 0.2280 - accuracy: 0.9140 - val_loss: 0.1427 - val_accuracy: 0.9449 - 89s/epoch - 286ms/step Epoch 8/10 312/312 - 89s - loss: 0.2197 - accuracy: 0.9152 - val_loss: 0.1459 - val_accuracy: 0.9445 - 89s/epoch - 285ms/step Epoch 9/10 312/312 - 90s - loss: 0.2152 - accuracy: 0.9167 - val_loss: 0.1489 - val_accuracy: 0.9439 - 90s/epoch - 287ms/step Epoch 10/10 312/312 - 89s - loss: 0.2169 - accuracy: 0.9144 - val_loss: 0.1398 - val_accuracy: 0.9475 - 89s/epoch - 284ms/step Epoch 1/10 312/312 - 94s - loss: 0.3444 - accuracy: 0.8700 - val_loss: 0.1802 - val_accuracy: 0.9323 - 94s/epoch - 302ms/step Epoch 2/10 312/312 - 89s - loss: 0.2522 - accuracy: 0.9038 - val_loss: 0.1931 - val_accuracy: 0.9279 - 89s/epoch - 285ms/step Epoch 3/10 312/312 - 89s - loss: 0.2399 - accuracy: 0.9072 - val_loss: 0.1518 - val_accuracy: 0.9421 - 89s/epoch - 284ms/step Epoch 4/10 312/312 - 88s - loss: 0.2357 - accuracy: 0.9083 - val_loss: 0.1506 - val_accuracy: 0.9401 - 88s/epoch - 283ms/step Epoch 5/10 312/312 - 88s - loss: 0.2360 - accuracy: 0.9080 - val_loss: 0.1445 - val_accuracy: 0.9453 - 88s/epoch - 282ms/step Epoch 6/10 312/312 - 88s - loss: 0.2239 - accuracy: 0.9114 - val_loss: 0.1487 - val_accuracy: 0.9433 - 88s/epoch - 281ms/step Epoch 7/10 312/312 - 88s - loss: 0.2214 - accuracy: 0.9146 - val_loss: 0.1443 - val_accuracy: 0.9449 - 88s/epoch - 282ms/step Epoch 8/10 312/312 - 89s - loss: 0.2191 - accuracy: 0.9144 - val_loss: 0.1406 - val_accuracy: 0.9447 - 89s/epoch - 285ms/step Epoch 9/10 312/312 - 89s - loss: 0.2181 - accuracy: 0.9141 - val_loss: 0.1436 - val_accuracy: 0.9425 - 89s/epoch - 284ms/step Epoch 10/10 312/312 - 88s - loss: 0.2170 - accuracy: 0.9167 - val_loss: 0.1434 - val_accuracy: 0.9463 - 88s/epoch - 283ms/step
df_optimizer_comp.insert(loc=5,
column='Epoch',
value=0)
# iterate through each row
Epoch = 0
i=0
for i in range(len(df_optimizer_comp)-1):
if df_optimizer_comp.iat[i,0] == df_optimizer_comp.iat[(i+1),0]:
Epoch = Epoch +1
df_optimizer_comp.at[(i+1),"Epoch"]=Epoch
else:
Epoch = 0
df_optimizer_comp.at[(i+1),"Epoch"]=Epoch
df_optimizer_comp
| Optimizer | Accuracy | Loss | Val_accuracy | Val_Loss | Epoch | |
|---|---|---|---|---|---|---|
| 0 | Adadelta | 0.655615 | 0.609140 | 0.828926 | 0.609140 | 0 |
| 1 | Adadelta | 0.809476 | 0.425171 | 0.870593 | 0.425171 | 1 |
| 2 | Adadelta | 0.848342 | 0.367513 | 0.890825 | 0.367513 | 2 |
| 3 | Adadelta | 0.864570 | 0.338908 | 0.898438 | 0.338908 | 3 |
| 4 | Adadelta | 0.871431 | 0.318792 | 0.908053 | 0.318792 | 4 |
| 5 | Adadelta | 0.877993 | 0.307455 | 0.910457 | 0.307455 | 5 |
| 6 | Adadelta | 0.884403 | 0.295401 | 0.916266 | 0.295401 | 6 |
| 7 | Adadelta | 0.886958 | 0.293465 | 0.918069 | 0.293465 | 7 |
| 8 | Adadelta | 0.888761 | 0.286569 | 0.920272 | 0.286569 | 8 |
| 9 | Adadelta | 0.889562 | 0.282993 | 0.918670 | 0.282993 | 9 |
| 10 | Adagrad | 0.861114 | 0.337434 | 0.928085 | 0.337434 | 0 |
| 11 | Adagrad | 0.896825 | 0.263782 | 0.932292 | 0.263782 | 1 |
| 12 | Adagrad | 0.902334 | 0.251234 | 0.931290 | 0.251234 | 2 |
| 13 | Adagrad | 0.907994 | 0.240201 | 0.938502 | 0.240201 | 3 |
| 14 | Adagrad | 0.909596 | 0.231354 | 0.940905 | 0.231354 | 4 |
| 15 | Adagrad | 0.912251 | 0.228275 | 0.938902 | 0.228275 | 5 |
| 16 | Adagrad | 0.913052 | 0.228364 | 0.942708 | 0.228364 | 6 |
| 17 | Adagrad | 0.913403 | 0.223536 | 0.941707 | 0.223536 | 7 |
| 18 | Adagrad | 0.915206 | 0.219396 | 0.942909 | 0.219396 | 8 |
| 19 | Adagrad | 0.913353 | 0.222980 | 0.944311 | 0.222980 | 9 |
| 20 | Adam | 0.854002 | 0.467580 | 0.931891 | 0.467580 | 0 |
| 21 | Adam | 0.897876 | 0.264848 | 0.936298 | 0.264848 | 1 |
| 22 | Adam | 0.903085 | 0.248832 | 0.935497 | 0.248832 | 2 |
| 23 | Adam | 0.907242 | 0.243135 | 0.940905 | 0.243135 | 3 |
| 24 | Adam | 0.913453 | 0.225375 | 0.942308 | 0.225375 | 4 |
| 25 | Adam | 0.914054 | 0.220914 | 0.943109 | 0.220914 | 5 |
| 26 | Adam | 0.915757 | 0.220525 | 0.946314 | 0.220525 | 6 |
| 27 | Adam | 0.918111 | 0.212321 | 0.945513 | 0.212321 | 7 |
| 28 | Adam | 0.914755 | 0.220395 | 0.936498 | 0.220395 | 8 |
| 29 | Adam | 0.917259 | 0.213237 | 0.946314 | 0.213237 | 9 |
| 30 | RMSprop | 0.806721 | 0.863432 | 0.930889 | 0.863432 | 0 |
| 31 | RMSprop | 0.880547 | 0.311844 | 0.930889 | 0.311844 | 1 |
| 32 | RMSprop | 0.894421 | 0.280428 | 0.940104 | 0.280428 | 2 |
| 33 | RMSprop | 0.898778 | 0.265663 | 0.918670 | 0.265663 | 3 |
| 34 | RMSprop | 0.906892 | 0.249127 | 0.936699 | 0.249127 | 4 |
| 35 | RMSprop | 0.906491 | 0.242237 | 0.941306 | 0.242237 | 5 |
| 36 | RMSprop | 0.910247 | 0.235524 | 0.941106 | 0.235524 | 6 |
| 37 | RMSprop | 0.912451 | 0.232157 | 0.941707 | 0.232157 | 7 |
| 38 | RMSprop | 0.913603 | 0.232005 | 0.945713 | 0.232005 | 8 |
| 39 | RMSprop | 0.919864 | 0.224663 | 0.939503 | 0.224663 | 9 |
| 40 | SGD | 0.861765 | 0.339010 | 0.923478 | 0.339010 | 0 |
| 41 | SGD | 0.897826 | 0.254838 | 0.935096 | 0.254838 | 1 |
| 42 | SGD | 0.907142 | 0.237092 | 0.927885 | 0.237092 | 2 |
| 43 | SGD | 0.909797 | 0.233330 | 0.940505 | 0.233330 | 3 |
| 44 | SGD | 0.913753 | 0.226234 | 0.942508 | 0.226234 | 4 |
| 45 | SGD | 0.912451 | 0.225411 | 0.944311 | 0.225411 | 5 |
| 46 | SGD | 0.913954 | 0.228016 | 0.944912 | 0.228016 | 6 |
| 47 | SGD | 0.915156 | 0.219727 | 0.944511 | 0.219727 | 7 |
| 48 | SGD | 0.916658 | 0.215158 | 0.943910 | 0.215158 | 8 |
| 49 | SGD | 0.914354 | 0.216886 | 0.947516 | 0.216886 | 9 |
| 50 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.869979 | 0.344422 | 0.932292 | 0.344422 | 0 |
| 51 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.903786 | 0.252161 | 0.927885 | 0.252161 | 1 |
| 52 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.907192 | 0.239868 | 0.942107 | 0.239868 | 2 |
| 53 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.908294 | 0.235669 | 0.940104 | 0.235669 | 3 |
| 54 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.908044 | 0.236048 | 0.945312 | 0.236048 | 4 |
| 55 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.911399 | 0.223935 | 0.943309 | 0.223935 | 5 |
| 56 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.914555 | 0.221444 | 0.944912 | 0.221444 | 6 |
| 57 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.914405 | 0.219064 | 0.944712 | 0.219064 | 7 |
| 58 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.914104 | 0.218141 | 0.942508 | 0.218141 | 8 |
| 59 | <keras.optimizer_v2.adamax.Adamax object at 0x... | 0.916658 | 0.217018 | 0.946314 | 0.217018 | 9 |
fig, ax =plt.subplots(1,2)
sns.lineplot('Epoch', 'Accuracy', data=df_optimizer_comp, hue="Optimizer",style="Optimizer", palette="tab10", linewidth=2.5, ax=ax[0]).set(title='Inception Model w/Augmentation: Training Accuracy')
sns.lineplot('Epoch', 'Val_accuracy', data=df_optimizer_comp, hue="Optimizer",style="Optimizer", palette="tab10", linewidth=2.5, ax=ax[1]).set(title='Inception Model w/Augmentation: Validation Accuracy')
plt.yticks(np.arange(0.50, 1.00, 0.05))
fig.set_size_inches(16,10)
fig.show()
accuracy = model.evaluate(test_images, test_labels, verbose=1)
print('\n', 'Test_Accuracy:-', accuracy[1])
82/82 [==============================] - 4s 34ms/step - loss: 0.1622 - accuracy: 0.9381 Test_Accuracy:- 0.938076913356781
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
pred = model.predict(test_images)
pred = np.argmax(pred,axis = 1)
y_true = np.argmax(test_labels,axis = 1)
#Printing the classification report
print(classification_report(y_true,pred))
#Plotting the heatmap using confusion matrix
cm = confusion_matrix(y_true,pred)
plt.figure(figsize=(8,5))
sns.heatmap(cm, annot=True, fmt='.0f', xticklabels=['Uninfected', 'Parasitized'], yticklabels=['Uninfected', 'Parasitized'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
precision recall f1-score support
0 0.94 0.94 0.94 1300
1 0.94 0.94 0.94 1300
accuracy 0.94 2600
macro avg 0.94 0.94 0.94 2600
weighted avg 0.94 0.94 0.94 2600
Observations: Model 6
| Test Accuracy | False negative out of 1,300 |
False Positive out of 1,300 |
|
|---|---|---|---|
| Base | 0.97 | 41 | 28 |
| Model 1 | 0.95 | 65 | 61 |
| Model 2 | 0.98 | 22 | 33 |
| Model 3 | 0.98 | 23 | 25 |
| Model 4 | 0.95 | 65 | 54 |
| Model 5 | 0.92 | 47 | 154 |
| Model 6 | 0.94 | 83 | 78 |
KyrasTuner Code Reference: https://keras.io/keras_tuner/
#Clearing backend
from tensorflow.keras import backend
backend.clear_session()
#Fixing the seed for random number generators
np.random.seed(42)
import random
random.seed(42)
tf.random.set_seed(42)
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train_images, train_labels, test_size=0.2, random_state=42)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Using ImageDataGenerator to generate images
# your code here
#def add_noise(img):
# VARIABILITY = 0.1 # customize this
# deviation = VARIABILITY*np.random.random()
# noise = np.random.normal(0, deviation, img.shape)
# img += noise
# #img = np.clip(img, 0., 255.)
# return img
datagen_aug = ImageDataGenerator(
#rescale=1./255,
# customize these and other parameters
rotation_range=45,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0,
zoom_range=0,
#preprocessing_function=add_noise,
horizontal_flip=True,
fill_mode='nearest',
)
val_datagen = ImageDataGenerator()
# Flowing training images using train_datagen generator
train_generator = datagen_aug.flow(x = X_train, y = y_train, batch_size=64, seed=42, shuffle=True)
# Flowing validation images using val_datagen generator
val_generator = val_datagen.flow(x= X_val, y = y_val, batch_size=64, seed=42, shuffle=True)
#Creating an iterable for images and labels from the training data
images, labels = next(train_generator)
pip install keras-tuner --upgrade
Collecting keras-tuner
Downloading keras_tuner-1.1.2-py3-none-any.whl (133 kB)
|████████████████████████████████| 133 kB 5.1 MB/s
Collecting kt-legacy
Downloading kt_legacy-1.0.4-py3-none-any.whl (9.6 kB)
Requirement already satisfied: tensorboard in /usr/local/lib/python3.7/dist-packages (from keras-tuner) (2.8.0)
Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from keras-tuner) (2.23.0)
Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from keras-tuner) (1.21.6)
Requirement already satisfied: ipython in /usr/local/lib/python3.7/dist-packages (from keras-tuner) (5.5.0)
Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from keras-tuner) (21.3)
Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner) (5.1.1)
Requirement already satisfied: pygments in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner) (2.6.1)
Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner) (0.8.1)
Requirement already satisfied: pickleshare in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner) (0.7.5)
Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner) (1.0.18)
Requirement already satisfied: decorator in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner) (4.4.2)
Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner) (57.4.0)
Requirement already satisfied: pexpect in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner) (4.8.0)
Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython->keras-tuner) (1.15.0)
Requirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython->keras-tuner) (0.2.5)
Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->keras-tuner) (3.0.8)
Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.7/dist-packages (from pexpect->ipython->keras-tuner) (0.7.0)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->keras-tuner) (3.0.4)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->keras-tuner) (1.24.3)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->keras-tuner) (2021.10.8)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->keras-tuner) (2.10)
Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner) (0.6.1)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner) (3.3.6)
Requirement already satisfied: grpcio>=1.24.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner) (1.44.0)
Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner) (1.0.0)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner) (0.4.6)
Requirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner) (3.17.3)
Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner) (1.8.1)
Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner) (0.37.1)
Requirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner) (1.35.0)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner) (1.0.1)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard->keras-tuner) (4.2.4)
Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard->keras-tuner) (4.8)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard->keras-tuner) (0.2.8)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard->keras-tuner) (1.3.1)
Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard->keras-tuner) (4.11.3)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard->keras-tuner) (3.8.0)
Requirement already satisfied: typing-extensions>=3.6.4 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard->keras-tuner) (4.1.1)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard->keras-tuner) (0.4.8)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard->keras-tuner) (3.2.0)
Installing collected packages: kt-legacy, keras-tuner
Successfully installed keras-tuner-1.1.2 kt-legacy-1.0.4
import tensorflow as tf
import kerastuner as kt
from tensorflow import keras
# Define Activation Function
activation_f = LeakyReLU(0.1)
# creating sequential model
model7 = Sequential()
# First Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding and input shape of 64*64*3
model7.add(Conv2D(32, (3,3), input_shape=(64, 64, 3), padding='same', activation=activation_f))
# max-pooling layer with a pool size of 2
model7.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model7.add(Dropout(0.2))
#BatchNormalization layer
#model7.add(BatchNormalization())
# Flatten the output from the previous layer
model7.add(Flatten())
# Hidden Layer 1
model7.add(Dense(512, activation=activation_f))
model7.add(Dropout(0.2))
# Output layer with nodes equal to the number of classes and softmax activation
model7.add(Dropout(0.1))
model7.add(Dense(2,activation="softmax")) #2 represent output layer neurons
# Define Optimizer
adam = optimizers.Adam(learning_rate=0.001)
# Compile the Model
model7.compile(loss="binary_crossentropy", optimizer=adam, metrics = ['accuracy'])
# Number of epochs
NUM_EPOCHS = 20
# Callbacks help in saving the checkpoints and stopping at an accuracy where the model does not seem to improve
callbacks = [EarlyStopping(monitor='val_loss', patience=2),
ModelCheckpoint('.mdl_wts.hdf5', monitor='val_loss', save_best_only=True)]
history7 = model7.fit(train_generator,
validation_data=val_generator,
batch_size=32,callbacks=callbacks,
epochs=NUM_EPOCHS,verbose=1)
Epoch 1/20 312/312 [==============================] - 26s 80ms/step - loss: 0.6668 - accuracy: 0.6585 - val_loss: 0.6488 - val_accuracy: 0.6226 Epoch 2/20 312/312 [==============================] - 21s 67ms/step - loss: 0.5620 - accuracy: 0.7279 - val_loss: 0.5362 - val_accuracy: 0.7500 Epoch 3/20 312/312 [==============================] - 21s 67ms/step - loss: 0.5235 - accuracy: 0.7542 - val_loss: 0.4882 - val_accuracy: 0.7718 Epoch 4/20 312/312 [==============================] - 21s 66ms/step - loss: 0.4968 - accuracy: 0.7733 - val_loss: 0.5582 - val_accuracy: 0.6609 Epoch 5/20 312/312 [==============================] - 22s 69ms/step - loss: 0.4421 - accuracy: 0.8093 - val_loss: 0.4666 - val_accuracy: 0.7770 Epoch 6/20 312/312 [==============================] - 21s 67ms/step - loss: 0.4244 - accuracy: 0.8179 - val_loss: 0.3084 - val_accuracy: 0.8722 Epoch 7/20 312/312 [==============================] - 21s 66ms/step - loss: 0.3781 - accuracy: 0.8437 - val_loss: 0.4419 - val_accuracy: 0.7740 Epoch 8/20 312/312 [==============================] - 21s 67ms/step - loss: 0.3502 - accuracy: 0.8578 - val_loss: 0.2760 - val_accuracy: 0.8940 Epoch 9/20 312/312 [==============================] - 20s 65ms/step - loss: 0.3387 - accuracy: 0.8601 - val_loss: 0.2911 - val_accuracy: 0.8810 Epoch 10/20 312/312 [==============================] - 21s 66ms/step - loss: 0.3157 - accuracy: 0.8760 - val_loss: 0.2154 - val_accuracy: 0.9040 Epoch 11/20 312/312 [==============================] - 20s 66ms/step - loss: 0.3058 - accuracy: 0.8803 - val_loss: 0.3294 - val_accuracy: 0.8646 Epoch 12/20 312/312 [==============================] - 21s 67ms/step - loss: 0.2941 - accuracy: 0.8832 - val_loss: 0.2040 - val_accuracy: 0.9161 Epoch 13/20 312/312 [==============================] - 21s 67ms/step - loss: 0.2797 - accuracy: 0.8918 - val_loss: 0.2026 - val_accuracy: 0.9211 Epoch 14/20 312/312 [==============================] - 21s 68ms/step - loss: 0.2651 - accuracy: 0.8997 - val_loss: 0.1705 - val_accuracy: 0.9395 Epoch 15/20 312/312 [==============================] - 20s 65ms/step - loss: 0.2581 - accuracy: 0.9032 - val_loss: 0.2073 - val_accuracy: 0.9185 Epoch 16/20 312/312 [==============================] - 21s 66ms/step - loss: 0.2681 - accuracy: 0.8980 - val_loss: 0.1774 - val_accuracy: 0.9279
import pandas as pd
def evaluate_model(model, test_images, test_labels):
eval_dict = model.evaluate(test_images, test_labels, return_dict=True)
display_df = pd.DataFrame([eval_dict.values()], columns=[list(eval_dict.keys())])
return display_df
# Evaluate model on test set and add results to dataframe
results = evaluate_model(model7, test_images, test_labels)
# Set index to 'Baseline'
results.index = ['Baseline']
# Display results
results.head()
82/82 [==============================] - 0s 4ms/step - loss: 0.2326 - accuracy: 0.9004
| loss | accuracy | |
|---|---|---|
| Baseline | 0.232612 | 0.900385 |
def build_model(hp):
# Define Activation Function
activation_f = LeakyReLU(0.1)
# creating sequential model
model8 = keras.Sequential()
# First Convolutional layer with 32 filters and kernel size of 2. Use the 'same' padding and input shape of 64*64*3
model8.add(Conv2D(32, (3,3), input_shape=(64, 64, 3), padding='same', activation=activation_f))
# max-pooling layer with a pool size of 2
model8.add(MaxPooling2D(pool_size=2))
# Add dropout to randomly switch off 20% neurons to reduce overfitting
model8.add(Dropout(0.2))
#BatchNormalization layer
#model8.add(BatchNormalization())
# Flatten the output from the previous layer
model8.add(Flatten())
# Hidden Layers:
# Tune the number of hidden layers and units in each.
# Number of hidden layers: 1 - 5
# Number of Units: 32 - 512 with stepsize of 32
for i in range(1, hp.Int("num_layers", 2, 6)):
model8.add(
keras.layers.Dense(
units=hp.Int("units_" + str(i), min_value=32, max_value=512, step=32),
activation="relu")
)
# Tune dropout layer with values from 0 - 0.3 with stepsize of 0.1.
model8.add(keras.layers.Dropout(hp.Float("dropout_" + str(i), 0, 0.3, step=0.1)))
# Add output layer.
model8.add(keras.layers.Dense(units=2, activation="softmax"))
# Tune learning rate for Adam optimizer with values from 0.01, 0.001, or 0.0001
hp_learning_rate = hp.Choice("learning_rate", values=[1e-2, 1e-3, 1e-4])
model8.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss="binary_crossentropy",
metrics=["accuracy"])
return model8
tuner = kt.RandomSearch(build_model,
objective="val_accuracy",
max_trials=7
)
# Display search space summary
tuner.search(train_generator,validation_data=val_generator, epochs=5)
best_model = tuner.get_best_models()[0]
Trial 7 Complete [00h 01m 43s] val_accuracy: 0.510817289352417 Best val_accuracy So Far: 0.8553686141967773 Total elapsed time: 00h 12m 41s INFO:tensorflow:Oracle triggered exit
# Get the optimal hyperparameters from the results
best_hps=tuner.get_best_hyperparameters()[0]
# Build model
model9 = tuner.hypermodel.build(best_hps)
# Train the hypertuned model
history9 = model9.fit(train_generator, validation_data=val_generator, epochs=NUM_EPOCHS, callbacks=callbacks, verbose=2)
Epoch 1/20 312/312 - 20s - loss: 0.6351 - accuracy: 0.6529 - val_loss: 0.5922 - val_accuracy: 0.6903 - 20s/epoch - 63ms/step Epoch 2/20 312/312 - 19s - loss: 0.5653 - accuracy: 0.7172 - val_loss: 0.6286 - val_accuracy: 0.6276 - 19s/epoch - 61ms/step Epoch 3/20 312/312 - 19s - loss: 0.5172 - accuracy: 0.7551 - val_loss: 0.5309 - val_accuracy: 0.7175 - 19s/epoch - 60ms/step Epoch 4/20 312/312 - 19s - loss: 0.4779 - accuracy: 0.7744 - val_loss: 0.3957 - val_accuracy: 0.8169 - 19s/epoch - 61ms/step Epoch 5/20 312/312 - 19s - loss: 0.3960 - accuracy: 0.8207 - val_loss: 0.5835 - val_accuracy: 0.6328 - 19s/epoch - 60ms/step Epoch 6/20 312/312 - 19s - loss: 0.3310 - accuracy: 0.8590 - val_loss: 0.2612 - val_accuracy: 0.8916 - 19s/epoch - 60ms/step Epoch 7/20 312/312 - 19s - loss: 0.2879 - accuracy: 0.8833 - val_loss: 0.2278 - val_accuracy: 0.9091 - 19s/epoch - 61ms/step Epoch 8/20 312/312 - 19s - loss: 0.2779 - accuracy: 0.8874 - val_loss: 0.2179 - val_accuracy: 0.9243 - 19s/epoch - 60ms/step Epoch 9/20 312/312 - 19s - loss: 0.2573 - accuracy: 0.8973 - val_loss: 0.2791 - val_accuracy: 0.9024 - 19s/epoch - 60ms/step Epoch 10/20 312/312 - 19s - loss: 0.2476 - accuracy: 0.9039 - val_loss: 0.1853 - val_accuracy: 0.9337 - 19s/epoch - 61ms/step Epoch 11/20 312/312 - 19s - loss: 0.2430 - accuracy: 0.9053 - val_loss: 0.1974 - val_accuracy: 0.9323 - 19s/epoch - 60ms/step Epoch 12/20 312/312 - 19s - loss: 0.2370 - accuracy: 0.9094 - val_loss: 0.1687 - val_accuracy: 0.9335 - 19s/epoch - 61ms/step Epoch 13/20 312/312 - 19s - loss: 0.2348 - accuracy: 0.9092 - val_loss: 0.1987 - val_accuracy: 0.9415 - 19s/epoch - 61ms/step Epoch 14/20 312/312 - 19s - loss: 0.2266 - accuracy: 0.9120 - val_loss: 0.1772 - val_accuracy: 0.9257 - 19s/epoch - 60ms/step
# Evaluate model on test set
hyper_df = evaluate_model(model9, test_images, test_labels)
# Set index to hypertuned
hyper_df.index = ["Hypertuned"]
# Append results in dataframe
results.append(hyper_df)
82/82 [==============================] - 0s 3ms/step - loss: 0.2256 - accuracy: 0.9008
| loss | accuracy | |
|---|---|---|
| Baseline | 0.232612 | 0.900385 |
| Hypertuned | 0.225622 | 0.900769 |
# Evaluating the model on test data
# plotting the accuracies
plot_accuracy(history9)
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
pred9 = model9.predict(test_images)
pred9 = np.argmax(pred9,axis = 1)
y_true9 = np.argmax(test_labels,axis = 1)
#Printing the classification report
print(classification_report(y_true9,pred9))
#Plotting the heatmap using confusion matrix
cm9 = confusion_matrix(y_true9,pred9)
plt.figure(figsize=(8,5))
sns.heatmap(cm9, annot=True, fmt='.0f', xticklabels=['Uninfected', 'Parasitized'], yticklabels=['Uninfected', 'Parasitized'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
precision recall f1-score support
0 0.97 0.83 0.89 1300
1 0.85 0.97 0.91 1300
accuracy 0.90 2600
macro avg 0.91 0.90 0.90 2600
weighted avg 0.91 0.90 0.90 2600
Observations: Models 7
| Test Accuracy | False negative out of 1,300 |
False Positive out of 1,300 |
|
|---|---|---|---|
| Base | 0.97 | 41 | 28 |
| Model 1 | 0.95 | 65 | 61 |
| Model 2 | 0.98 | 22 | 33 |
| Model 3 | 0.98 | 23 | 25 |
| Model 4 | 0.95 | 65 | 54 |
| Model 5 | 0.92 | 47 | 154 |
| Model 6 | 0.94 | 83 | 78 |
| Model 7 | 0.90 | 37 | 221 |
Technical
Business